]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.0.4-201108301903.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.4-201108301903.patch
CommitLineData
95507136
PK
1diff -urNp linux-3.0.4/arch/alpha/include/asm/elf.h linux-3.0.4/arch/alpha/include/asm/elf.h
2--- linux-3.0.4/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
3+++ linux-3.0.4/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
4@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-3.0.4/arch/alpha/include/asm/pgtable.h linux-3.0.4/arch/alpha/include/asm/pgtable.h
19--- linux-3.0.4/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
20+++ linux-3.0.4/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-3.0.4/arch/alpha/kernel/module.c linux-3.0.4/arch/alpha/kernel/module.c
40--- linux-3.0.4/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
41+++ linux-3.0.4/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-3.0.4/arch/alpha/kernel/osf_sys.c linux-3.0.4/arch/alpha/kernel/osf_sys.c
52--- linux-3.0.4/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
53+++ linux-3.0.4/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
54@@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-3.0.4/arch/alpha/mm/fault.c linux-3.0.4/arch/alpha/mm/fault.c
86--- linux-3.0.4/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
87+++ linux-3.0.4/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-3.0.4/arch/arm/include/asm/elf.h linux-3.0.4/arch/arm/include/asm/elf.h
245--- linux-3.0.4/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
246+++ linux-3.0.4/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
247@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267-struct mm_struct;
268-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269-#define arch_randomize_brk arch_randomize_brk
270-
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274diff -urNp linux-3.0.4/arch/arm/include/asm/kmap_types.h linux-3.0.4/arch/arm/include/asm/kmap_types.h
275--- linux-3.0.4/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
276+++ linux-3.0.4/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
277@@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281+ KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285diff -urNp linux-3.0.4/arch/arm/include/asm/uaccess.h linux-3.0.4/arch/arm/include/asm/uaccess.h
286--- linux-3.0.4/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
287+++ linux-3.0.4/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
288@@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292+extern void check_object_size(const void *ptr, unsigned long n, bool to);
293+
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297@@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305+
306+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307+{
308+ if (!__builtin_constant_p(n))
309+ check_object_size(to, n, false);
310+ return ___copy_from_user(to, from, n);
311+}
312+
313+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314+{
315+ if (!__builtin_constant_p(n))
316+ check_object_size(from, n, true);
317+ return ___copy_to_user(to, from, n);
318+}
319+
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327+ if ((long)n < 0)
328+ return n;
329+
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333@@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337+ if ((long)n < 0)
338+ return n;
339+
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343diff -urNp linux-3.0.4/arch/arm/kernel/armksyms.c linux-3.0.4/arch/arm/kernel/armksyms.c
344--- linux-3.0.4/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
345+++ linux-3.0.4/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
346@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350-EXPORT_SYMBOL(__copy_from_user);
351-EXPORT_SYMBOL(__copy_to_user);
352+EXPORT_SYMBOL(___copy_from_user);
353+EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357diff -urNp linux-3.0.4/arch/arm/kernel/process.c linux-3.0.4/arch/arm/kernel/process.c
358--- linux-3.0.4/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
359+++ linux-3.0.4/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
360@@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364-#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368@@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372-unsigned long arch_randomize_brk(struct mm_struct *mm)
373-{
374- unsigned long range_end = mm->brk + 0x02000000;
375- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376-}
377-
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381diff -urNp linux-3.0.4/arch/arm/kernel/traps.c linux-3.0.4/arch/arm/kernel/traps.c
382--- linux-3.0.4/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383+++ linux-3.0.4/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384@@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388+extern void gr_handle_kernel_exploit(void);
389+
390 /*
391 * This function is protected against re-entrancy.
392 */
393@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397+
398+ gr_handle_kernel_exploit();
399+
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403diff -urNp linux-3.0.4/arch/arm/lib/copy_from_user.S linux-3.0.4/arch/arm/lib/copy_from_user.S
404--- linux-3.0.4/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
405+++ linux-3.0.4/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
406@@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410- * size_t __copy_from_user(void *to, const void *from, size_t n)
411+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415@@ -84,11 +84,11 @@
416
417 .text
418
419-ENTRY(__copy_from_user)
420+ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424-ENDPROC(__copy_from_user)
425+ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429diff -urNp linux-3.0.4/arch/arm/lib/copy_to_user.S linux-3.0.4/arch/arm/lib/copy_to_user.S
430--- linux-3.0.4/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
431+++ linux-3.0.4/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
432@@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436- * size_t __copy_to_user(void *to, const void *from, size_t n)
437+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441@@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445-WEAK(__copy_to_user)
446+WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450-ENDPROC(__copy_to_user)
451+ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455diff -urNp linux-3.0.4/arch/arm/lib/uaccess.S linux-3.0.4/arch/arm/lib/uaccess.S
456--- linux-3.0.4/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
457+++ linux-3.0.4/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
458@@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471-ENTRY(__copy_to_user)
472+ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480-ENDPROC(__copy_to_user)
481+ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497-ENTRY(__copy_from_user)
498+ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506-ENDPROC(__copy_from_user)
507+ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511diff -urNp linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c
512--- linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
513+++ linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
514@@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518-__copy_to_user(void __user *to, const void *from, unsigned long n)
519+___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523diff -urNp linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c
524--- linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
525+++ linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
526@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535diff -urNp linux-3.0.4/arch/arm/mm/fault.c linux-3.0.4/arch/arm/mm/fault.c
536--- linux-3.0.4/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
537+++ linux-3.0.4/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
538@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542+#ifdef CONFIG_PAX_PAGEEXEC
543+ if (fsr & FSR_LNX_PF) {
544+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545+ do_group_exit(SIGKILL);
546+ }
547+#endif
548+
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552@@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556+#ifdef CONFIG_PAX_PAGEEXEC
557+void pax_report_insns(void *pc, void *sp)
558+{
559+ long i;
560+
561+ printk(KERN_ERR "PAX: bytes at PC: ");
562+ for (i = 0; i < 20; i++) {
563+ unsigned char c;
564+ if (get_user(c, (__force unsigned char __user *)pc+i))
565+ printk(KERN_CONT "?? ");
566+ else
567+ printk(KERN_CONT "%02x ", c);
568+ }
569+ printk("\n");
570+
571+ printk(KERN_ERR "PAX: bytes at SP-4: ");
572+ for (i = -1; i < 20; i++) {
573+ unsigned long c;
574+ if (get_user(c, (__force unsigned long __user *)sp+i))
575+ printk(KERN_CONT "???????? ");
576+ else
577+ printk(KERN_CONT "%08lx ", c);
578+ }
579+ printk("\n");
580+}
581+#endif
582+
583 /*
584 * First Level Translation Fault Handler
585 *
586diff -urNp linux-3.0.4/arch/arm/mm/mmap.c linux-3.0.4/arch/arm/mm/mmap.c
587--- linux-3.0.4/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
588+++ linux-3.0.4/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
589@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593+#ifdef CONFIG_PAX_RANDMMAP
594+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595+#endif
596+
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604- if (TASK_SIZE - len >= addr &&
605- (!vma || addr + len <= vma->vm_start))
606+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610- start_addr = addr = mm->free_area_cache;
611+ start_addr = addr = mm->free_area_cache;
612 } else {
613- start_addr = addr = TASK_UNMAPPED_BASE;
614- mm->cached_hole_size = 0;
615+ start_addr = addr = mm->mmap_base;
616+ mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620@@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624- if (start_addr != TASK_UNMAPPED_BASE) {
625- start_addr = addr = TASK_UNMAPPED_BASE;
626+ if (start_addr != mm->mmap_base) {
627+ start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633- if (!vma || addr + len <= vma->vm_start) {
634+ if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638diff -urNp linux-3.0.4/arch/avr32/include/asm/elf.h linux-3.0.4/arch/avr32/include/asm/elf.h
639--- linux-3.0.4/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
640+++ linux-3.0.4/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
641@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648+#ifdef CONFIG_PAX_ASLR
649+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650+
651+#define PAX_DELTA_MMAP_LEN 15
652+#define PAX_DELTA_STACK_LEN 15
653+#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657diff -urNp linux-3.0.4/arch/avr32/include/asm/kmap_types.h linux-3.0.4/arch/avr32/include/asm/kmap_types.h
658--- linux-3.0.4/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
659+++ linux-3.0.4/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
660@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664-D(14) KM_TYPE_NR
665+D(14) KM_CLEARPAGE,
666+D(15) KM_TYPE_NR
667 };
668
669 #undef D
670diff -urNp linux-3.0.4/arch/avr32/mm/fault.c linux-3.0.4/arch/avr32/mm/fault.c
671--- linux-3.0.4/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
672+++ linux-3.0.4/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
673@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677+#ifdef CONFIG_PAX_PAGEEXEC
678+void pax_report_insns(void *pc, void *sp)
679+{
680+ unsigned long i;
681+
682+ printk(KERN_ERR "PAX: bytes at PC: ");
683+ for (i = 0; i < 20; i++) {
684+ unsigned char c;
685+ if (get_user(c, (unsigned char *)pc+i))
686+ printk(KERN_CONT "???????? ");
687+ else
688+ printk(KERN_CONT "%02x ", c);
689+ }
690+ printk("\n");
691+}
692+#endif
693+
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697@@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706+ do_group_exit(SIGKILL);
707+ }
708+ }
709+#endif
710+
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714diff -urNp linux-3.0.4/arch/frv/include/asm/kmap_types.h linux-3.0.4/arch/frv/include/asm/kmap_types.h
715--- linux-3.0.4/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
716+++ linux-3.0.4/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
717@@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721+ KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725diff -urNp linux-3.0.4/arch/frv/mm/elf-fdpic.c linux-3.0.4/arch/frv/mm/elf-fdpic.c
726--- linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727+++ linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732- if (TASK_SIZE - len >= addr &&
733- (!vma || addr + len <= vma->vm_start))
734+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742- if (addr + len <= vma->vm_start)
743+ if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751- if (addr + len <= vma->vm_start)
752+ if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756diff -urNp linux-3.0.4/arch/ia64/include/asm/elf.h linux-3.0.4/arch/ia64/include/asm/elf.h
757--- linux-3.0.4/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
758+++ linux-3.0.4/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
759@@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763+#ifdef CONFIG_PAX_ASLR
764+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765+
766+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768+#endif
769+
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773diff -urNp linux-3.0.4/arch/ia64/include/asm/pgtable.h linux-3.0.4/arch/ia64/include/asm/pgtable.h
774--- linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775+++ linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776@@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780-
781+#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785@@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789+
790+#ifdef CONFIG_PAX_PAGEEXEC
791+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794+#else
795+# define PAGE_SHARED_NOEXEC PAGE_SHARED
796+# define PAGE_READONLY_NOEXEC PAGE_READONLY
797+# define PAGE_COPY_NOEXEC PAGE_COPY
798+#endif
799+
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803diff -urNp linux-3.0.4/arch/ia64/include/asm/spinlock.h linux-3.0.4/arch/ia64/include/asm/spinlock.h
804--- linux-3.0.4/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
805+++ linux-3.0.4/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
806@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815diff -urNp linux-3.0.4/arch/ia64/include/asm/uaccess.h linux-3.0.4/arch/ia64/include/asm/uaccess.h
816--- linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817+++ linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822- if (__access_ok(__cu_to, __cu_len, get_fs())) \
823+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831- if (__access_ok(__cu_from, __cu_len, get_fs())) \
832+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836diff -urNp linux-3.0.4/arch/ia64/kernel/module.c linux-3.0.4/arch/ia64/kernel/module.c
837--- linux-3.0.4/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
838+++ linux-3.0.4/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
839@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843- if (mod && mod->arch.init_unw_table &&
844- module_region == mod->module_init) {
845+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853+in_init_rx (const struct module *mod, uint64_t addr)
854+{
855+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856+}
857+
858+static inline int
859+in_init_rw (const struct module *mod, uint64_t addr)
860+{
861+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862+}
863+
864+static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867- return addr - (uint64_t) mod->module_init < mod->init_size;
868+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869+}
870+
871+static inline int
872+in_core_rx (const struct module *mod, uint64_t addr)
873+{
874+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875+}
876+
877+static inline int
878+in_core_rw (const struct module *mod, uint64_t addr)
879+{
880+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886- return addr - (uint64_t) mod->module_core < mod->core_size;
887+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896+ if (in_init_rx(mod, val))
897+ val -= (uint64_t) mod->module_init_rx;
898+ else if (in_init_rw(mod, val))
899+ val -= (uint64_t) mod->module_init_rw;
900+ else if (in_core_rx(mod, val))
901+ val -= (uint64_t) mod->module_core_rx;
902+ else if (in_core_rw(mod, val))
903+ val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911- if (mod->core_size > MAX_LTOFF)
912+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917- gp = mod->core_size - MAX_LTOFF / 2;
918+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920- gp = mod->core_size / 2;
921- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927diff -urNp linux-3.0.4/arch/ia64/kernel/sys_ia64.c linux-3.0.4/arch/ia64/kernel/sys_ia64.c
928--- linux-3.0.4/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
929+++ linux-3.0.4/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
930@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934+
935+#ifdef CONFIG_PAX_RANDMMAP
936+ if (mm->pax_flags & MF_PAX_RANDMMAP)
937+ addr = mm->free_area_cache;
938+ else
939+#endif
940+
941 if (!addr)
942 addr = mm->free_area_cache;
943
944@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948- if (start_addr != TASK_UNMAPPED_BASE) {
949+ if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951- addr = TASK_UNMAPPED_BASE;
952+ addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957- if (!vma || addr + len <= vma->vm_start) {
958+ if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962diff -urNp linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S
963--- linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
964+++ linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
965@@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969- __phys_per_cpu_start = __per_cpu_load;
970+ __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974diff -urNp linux-3.0.4/arch/ia64/mm/fault.c linux-3.0.4/arch/ia64/mm/fault.c
975--- linux-3.0.4/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
976+++ linux-3.0.4/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
977@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981+#ifdef CONFIG_PAX_PAGEEXEC
982+void pax_report_insns(void *pc, void *sp)
983+{
984+ unsigned long i;
985+
986+ printk(KERN_ERR "PAX: bytes at PC: ");
987+ for (i = 0; i < 8; i++) {
988+ unsigned int c;
989+ if (get_user(c, (unsigned int *)pc+i))
990+ printk(KERN_CONT "???????? ");
991+ else
992+ printk(KERN_CONT "%08x ", c);
993+ }
994+ printk("\n");
995+}
996+#endif
997+
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005- if ((vma->vm_flags & mask) != mask)
1006+ if ((vma->vm_flags & mask) != mask) {
1007+
1008+#ifdef CONFIG_PAX_PAGEEXEC
1009+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011+ goto bad_area;
1012+
1013+ up_read(&mm->mmap_sem);
1014+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015+ do_group_exit(SIGKILL);
1016+ }
1017+#endif
1018+
1019 goto bad_area;
1020
1021+ }
1022+
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026diff -urNp linux-3.0.4/arch/ia64/mm/hugetlbpage.c linux-3.0.4/arch/ia64/mm/hugetlbpage.c
1027--- linux-3.0.4/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1028+++ linux-3.0.4/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1029@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033- if (!vmm || (addr + len) <= vmm->vm_start)
1034+ if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038diff -urNp linux-3.0.4/arch/ia64/mm/init.c linux-3.0.4/arch/ia64/mm/init.c
1039--- linux-3.0.4/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1040+++ linux-3.0.4/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1041@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045+
1046+#ifdef CONFIG_PAX_PAGEEXEC
1047+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048+ vma->vm_flags &= ~VM_EXEC;
1049+
1050+#ifdef CONFIG_PAX_MPROTECT
1051+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052+ vma->vm_flags &= ~VM_MAYEXEC;
1053+#endif
1054+
1055+ }
1056+#endif
1057+
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061diff -urNp linux-3.0.4/arch/m32r/lib/usercopy.c linux-3.0.4/arch/m32r/lib/usercopy.c
1062--- linux-3.0.4/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1063+++ linux-3.0.4/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1064@@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068+ if ((long)n < 0)
1069+ return n;
1070+
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078+ if ((long)n < 0)
1079+ return n;
1080+
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084diff -urNp linux-3.0.4/arch/mips/include/asm/elf.h linux-3.0.4/arch/mips/include/asm/elf.h
1085--- linux-3.0.4/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1086+++ linux-3.0.4/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1087@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091+#ifdef CONFIG_PAX_ASLR
1092+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093+
1094+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096+#endif
1097+
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103-struct mm_struct;
1104-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105-#define arch_randomize_brk arch_randomize_brk
1106-
1107 #endif /* _ASM_ELF_H */
1108diff -urNp linux-3.0.4/arch/mips/include/asm/page.h linux-3.0.4/arch/mips/include/asm/page.h
1109--- linux-3.0.4/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1110+++ linux-3.0.4/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1111@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120diff -urNp linux-3.0.4/arch/mips/include/asm/system.h linux-3.0.4/arch/mips/include/asm/system.h
1121--- linux-3.0.4/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1122+++ linux-3.0.4/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1123@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127-extern unsigned long arch_align_stack(unsigned long sp);
1128+#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c
1132--- linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1133+++ linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1134@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138+#ifdef CONFIG_PAX_ASLR
1139+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140+
1141+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143+#endif
1144+
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c
1149--- linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1150+++ linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1151@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155+#ifdef CONFIG_PAX_ASLR
1156+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157+
1158+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160+#endif
1161+
1162 #include <asm/processor.h>
1163
1164 /*
1165diff -urNp linux-3.0.4/arch/mips/kernel/process.c linux-3.0.4/arch/mips/kernel/process.c
1166--- linux-3.0.4/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1167+++ linux-3.0.4/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1168@@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172-
1173-/*
1174- * Don't forget that the stack pointer must be aligned on a 8 bytes
1175- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176- */
1177-unsigned long arch_align_stack(unsigned long sp)
1178-{
1179- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180- sp -= get_random_int() & ~PAGE_MASK;
1181-
1182- return sp & ALMASK;
1183-}
1184diff -urNp linux-3.0.4/arch/mips/mm/fault.c linux-3.0.4/arch/mips/mm/fault.c
1185--- linux-3.0.4/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1186+++ linux-3.0.4/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1187@@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191+#ifdef CONFIG_PAX_PAGEEXEC
1192+void pax_report_insns(void *pc, void *sp)
1193+{
1194+ unsigned long i;
1195+
1196+ printk(KERN_ERR "PAX: bytes at PC: ");
1197+ for (i = 0; i < 5; i++) {
1198+ unsigned int c;
1199+ if (get_user(c, (unsigned int *)pc+i))
1200+ printk(KERN_CONT "???????? ");
1201+ else
1202+ printk(KERN_CONT "%08x ", c);
1203+ }
1204+ printk("\n");
1205+}
1206+#endif
1207+
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211diff -urNp linux-3.0.4/arch/mips/mm/mmap.c linux-3.0.4/arch/mips/mm/mmap.c
1212--- linux-3.0.4/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1213+++ linux-3.0.4/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1214@@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215 do_color_align = 0;
1216 if (filp || (flags & MAP_SHARED))
1217 do_color_align = 1;
1218+
1219+#ifdef CONFIG_PAX_RANDMMAP
1220+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221+#endif
1222+
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226 else
1227 addr = PAGE_ALIGN(addr);
1228 vmm = find_vma(current->mm, addr);
1229- if (TASK_SIZE - len >= addr &&
1230- (!vmm || addr + len <= vmm->vm_start))
1231+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232 return addr;
1233 }
1234 addr = current->mm->mmap_base;
1235@@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236 /* At this point: (!vmm || addr < vmm->vm_end). */
1237 if (TASK_SIZE - len < addr)
1238 return -ENOMEM;
1239- if (!vmm || addr + len <= vmm->vm_start)
1240+ if (check_heap_stack_gap(vmm, addr, len))
1241 return addr;
1242 addr = vmm->vm_end;
1243 if (do_color_align)
1244@@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245 mm->get_unmapped_area = arch_get_unmapped_area;
1246 mm->unmap_area = arch_unmap_area;
1247 }
1248-
1249-static inline unsigned long brk_rnd(void)
1250-{
1251- unsigned long rnd = get_random_int();
1252-
1253- rnd = rnd << PAGE_SHIFT;
1254- /* 8MB for 32bit, 256MB for 64bit */
1255- if (TASK_IS_32BIT_ADDR)
1256- rnd = rnd & 0x7ffffful;
1257- else
1258- rnd = rnd & 0xffffffful;
1259-
1260- return rnd;
1261-}
1262-
1263-unsigned long arch_randomize_brk(struct mm_struct *mm)
1264-{
1265- unsigned long base = mm->brk;
1266- unsigned long ret;
1267-
1268- ret = PAGE_ALIGN(base + brk_rnd());
1269-
1270- if (ret < mm->brk)
1271- return mm->brk;
1272-
1273- return ret;
1274-}
1275diff -urNp linux-3.0.4/arch/parisc/include/asm/elf.h linux-3.0.4/arch/parisc/include/asm/elf.h
1276--- linux-3.0.4/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1277+++ linux-3.0.4/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1278@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1279
1280 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1281
1282+#ifdef CONFIG_PAX_ASLR
1283+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1284+
1285+#define PAX_DELTA_MMAP_LEN 16
1286+#define PAX_DELTA_STACK_LEN 16
1287+#endif
1288+
1289 /* This yields a mask that user programs can use to figure out what
1290 instruction set this CPU supports. This could be done in user space,
1291 but it's not easy, and we've already done it here. */
1292diff -urNp linux-3.0.4/arch/parisc/include/asm/pgtable.h linux-3.0.4/arch/parisc/include/asm/pgtable.h
1293--- linux-3.0.4/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1294+++ linux-3.0.4/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1295@@ -210,6 +210,17 @@ struct vm_area_struct;
1296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297 #define PAGE_COPY PAGE_EXECREAD
1298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299+
1300+#ifdef CONFIG_PAX_PAGEEXEC
1301+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304+#else
1305+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1306+# define PAGE_COPY_NOEXEC PAGE_COPY
1307+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1308+#endif
1309+
1310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1313diff -urNp linux-3.0.4/arch/parisc/kernel/module.c linux-3.0.4/arch/parisc/kernel/module.c
1314--- linux-3.0.4/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1315+++ linux-3.0.4/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1316@@ -98,16 +98,38 @@
1317
1318 /* three functions to determine where in the module core
1319 * or init pieces the location is */
1320+static inline int in_init_rx(struct module *me, void *loc)
1321+{
1322+ return (loc >= me->module_init_rx &&
1323+ loc < (me->module_init_rx + me->init_size_rx));
1324+}
1325+
1326+static inline int in_init_rw(struct module *me, void *loc)
1327+{
1328+ return (loc >= me->module_init_rw &&
1329+ loc < (me->module_init_rw + me->init_size_rw));
1330+}
1331+
1332 static inline int in_init(struct module *me, void *loc)
1333 {
1334- return (loc >= me->module_init &&
1335- loc <= (me->module_init + me->init_size));
1336+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1337+}
1338+
1339+static inline int in_core_rx(struct module *me, void *loc)
1340+{
1341+ return (loc >= me->module_core_rx &&
1342+ loc < (me->module_core_rx + me->core_size_rx));
1343+}
1344+
1345+static inline int in_core_rw(struct module *me, void *loc)
1346+{
1347+ return (loc >= me->module_core_rw &&
1348+ loc < (me->module_core_rw + me->core_size_rw));
1349 }
1350
1351 static inline int in_core(struct module *me, void *loc)
1352 {
1353- return (loc >= me->module_core &&
1354- loc <= (me->module_core + me->core_size));
1355+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1356 }
1357
1358 static inline int in_local(struct module *me, void *loc)
1359@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360 }
1361
1362 /* align things a bit */
1363- me->core_size = ALIGN(me->core_size, 16);
1364- me->arch.got_offset = me->core_size;
1365- me->core_size += gots * sizeof(struct got_entry);
1366-
1367- me->core_size = ALIGN(me->core_size, 16);
1368- me->arch.fdesc_offset = me->core_size;
1369- me->core_size += fdescs * sizeof(Elf_Fdesc);
1370+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371+ me->arch.got_offset = me->core_size_rw;
1372+ me->core_size_rw += gots * sizeof(struct got_entry);
1373+
1374+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375+ me->arch.fdesc_offset = me->core_size_rw;
1376+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377
1378 me->arch.got_max = gots;
1379 me->arch.fdesc_max = fdescs;
1380@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1381
1382 BUG_ON(value == 0);
1383
1384- got = me->module_core + me->arch.got_offset;
1385+ got = me->module_core_rw + me->arch.got_offset;
1386 for (i = 0; got[i].addr; i++)
1387 if (got[i].addr == value)
1388 goto out;
1389@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1390 #ifdef CONFIG_64BIT
1391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392 {
1393- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395
1396 if (!value) {
1397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1399
1400 /* Create new one */
1401 fdesc->addr = value;
1402- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404 return (Elf_Addr)fdesc;
1405 }
1406 #endif /* CONFIG_64BIT */
1407@@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408
1409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410 end = table + sechdrs[me->arch.unwind_section].sh_size;
1411- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413
1414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415 me->arch.unwind_section, table, end, gp);
1416diff -urNp linux-3.0.4/arch/parisc/kernel/sys_parisc.c linux-3.0.4/arch/parisc/kernel/sys_parisc.c
1417--- linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418+++ linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420 /* At this point: (!vma || addr < vma->vm_end). */
1421 if (TASK_SIZE - len < addr)
1422 return -ENOMEM;
1423- if (!vma || addr + len <= vma->vm_start)
1424+ if (check_heap_stack_gap(vma, addr, len))
1425 return addr;
1426 addr = vma->vm_end;
1427 }
1428@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429 /* At this point: (!vma || addr < vma->vm_end). */
1430 if (TASK_SIZE - len < addr)
1431 return -ENOMEM;
1432- if (!vma || addr + len <= vma->vm_start)
1433+ if (check_heap_stack_gap(vma, addr, len))
1434 return addr;
1435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436 if (addr < vma->vm_end) /* handle wraparound */
1437@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438 if (flags & MAP_FIXED)
1439 return addr;
1440 if (!addr)
1441- addr = TASK_UNMAPPED_BASE;
1442+ addr = current->mm->mmap_base;
1443
1444 if (filp) {
1445 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446diff -urNp linux-3.0.4/arch/parisc/kernel/traps.c linux-3.0.4/arch/parisc/kernel/traps.c
1447--- linux-3.0.4/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1448+++ linux-3.0.4/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1449@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450
1451 down_read(&current->mm->mmap_sem);
1452 vma = find_vma(current->mm,regs->iaoq[0]);
1453- if (vma && (regs->iaoq[0] >= vma->vm_start)
1454- && (vma->vm_flags & VM_EXEC)) {
1455-
1456+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457 fault_address = regs->iaoq[0];
1458 fault_space = regs->iasq[0];
1459
1460diff -urNp linux-3.0.4/arch/parisc/mm/fault.c linux-3.0.4/arch/parisc/mm/fault.c
1461--- linux-3.0.4/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1462+++ linux-3.0.4/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1463@@ -15,6 +15,7 @@
1464 #include <linux/sched.h>
1465 #include <linux/interrupt.h>
1466 #include <linux/module.h>
1467+#include <linux/unistd.h>
1468
1469 #include <asm/uaccess.h>
1470 #include <asm/traps.h>
1471@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472 static unsigned long
1473 parisc_acctyp(unsigned long code, unsigned int inst)
1474 {
1475- if (code == 6 || code == 16)
1476+ if (code == 6 || code == 7 || code == 16)
1477 return VM_EXEC;
1478
1479 switch (inst & 0xf0000000) {
1480@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481 }
1482 #endif
1483
1484+#ifdef CONFIG_PAX_PAGEEXEC
1485+/*
1486+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487+ *
1488+ * returns 1 when task should be killed
1489+ * 2 when rt_sigreturn trampoline was detected
1490+ * 3 when unpatched PLT trampoline was detected
1491+ */
1492+static int pax_handle_fetch_fault(struct pt_regs *regs)
1493+{
1494+
1495+#ifdef CONFIG_PAX_EMUPLT
1496+ int err;
1497+
1498+ do { /* PaX: unpatched PLT emulation */
1499+ unsigned int bl, depwi;
1500+
1501+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503+
1504+ if (err)
1505+ break;
1506+
1507+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509+
1510+ err = get_user(ldw, (unsigned int *)addr);
1511+ err |= get_user(bv, (unsigned int *)(addr+4));
1512+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1513+
1514+ if (err)
1515+ break;
1516+
1517+ if (ldw == 0x0E801096U &&
1518+ bv == 0xEAC0C000U &&
1519+ ldw2 == 0x0E881095U)
1520+ {
1521+ unsigned int resolver, map;
1522+
1523+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525+ if (err)
1526+ break;
1527+
1528+ regs->gr[20] = instruction_pointer(regs)+8;
1529+ regs->gr[21] = map;
1530+ regs->gr[22] = resolver;
1531+ regs->iaoq[0] = resolver | 3UL;
1532+ regs->iaoq[1] = regs->iaoq[0] + 4;
1533+ return 3;
1534+ }
1535+ }
1536+ } while (0);
1537+#endif
1538+
1539+#ifdef CONFIG_PAX_EMUTRAMP
1540+
1541+#ifndef CONFIG_PAX_EMUSIGRT
1542+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543+ return 1;
1544+#endif
1545+
1546+ do { /* PaX: rt_sigreturn emulation */
1547+ unsigned int ldi1, ldi2, bel, nop;
1548+
1549+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553+
1554+ if (err)
1555+ break;
1556+
1557+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558+ ldi2 == 0x3414015AU &&
1559+ bel == 0xE4008200U &&
1560+ nop == 0x08000240U)
1561+ {
1562+ regs->gr[25] = (ldi1 & 2) >> 1;
1563+ regs->gr[20] = __NR_rt_sigreturn;
1564+ regs->gr[31] = regs->iaoq[1] + 16;
1565+ regs->sr[0] = regs->iasq[1];
1566+ regs->iaoq[0] = 0x100UL;
1567+ regs->iaoq[1] = regs->iaoq[0] + 4;
1568+ regs->iasq[0] = regs->sr[2];
1569+ regs->iasq[1] = regs->sr[2];
1570+ return 2;
1571+ }
1572+ } while (0);
1573+#endif
1574+
1575+ return 1;
1576+}
1577+
1578+void pax_report_insns(void *pc, void *sp)
1579+{
1580+ unsigned long i;
1581+
1582+ printk(KERN_ERR "PAX: bytes at PC: ");
1583+ for (i = 0; i < 5; i++) {
1584+ unsigned int c;
1585+ if (get_user(c, (unsigned int *)pc+i))
1586+ printk(KERN_CONT "???????? ");
1587+ else
1588+ printk(KERN_CONT "%08x ", c);
1589+ }
1590+ printk("\n");
1591+}
1592+#endif
1593+
1594 int fixup_exception(struct pt_regs *regs)
1595 {
1596 const struct exception_table_entry *fix;
1597@@ -192,8 +303,33 @@ good_area:
1598
1599 acc_type = parisc_acctyp(code,regs->iir);
1600
1601- if ((vma->vm_flags & acc_type) != acc_type)
1602+ if ((vma->vm_flags & acc_type) != acc_type) {
1603+
1604+#ifdef CONFIG_PAX_PAGEEXEC
1605+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606+ (address & ~3UL) == instruction_pointer(regs))
1607+ {
1608+ up_read(&mm->mmap_sem);
1609+ switch (pax_handle_fetch_fault(regs)) {
1610+
1611+#ifdef CONFIG_PAX_EMUPLT
1612+ case 3:
1613+ return;
1614+#endif
1615+
1616+#ifdef CONFIG_PAX_EMUTRAMP
1617+ case 2:
1618+ return;
1619+#endif
1620+
1621+ }
1622+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623+ do_group_exit(SIGKILL);
1624+ }
1625+#endif
1626+
1627 goto bad_area;
1628+ }
1629
1630 /*
1631 * If for any reason at all we couldn't handle the fault, make
1632diff -urNp linux-3.0.4/arch/powerpc/include/asm/elf.h linux-3.0.4/arch/powerpc/include/asm/elf.h
1633--- linux-3.0.4/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1634+++ linux-3.0.4/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1635@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636 the loader. We need to make sure that it is out of the way of the program
1637 that it will "exec", and that there is sufficient room for the brk. */
1638
1639-extern unsigned long randomize_et_dyn(unsigned long base);
1640-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1641+#define ELF_ET_DYN_BASE (0x20000000)
1642+
1643+#ifdef CONFIG_PAX_ASLR
1644+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1645+
1646+#ifdef __powerpc64__
1647+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1648+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1649+#else
1650+#define PAX_DELTA_MMAP_LEN 15
1651+#define PAX_DELTA_STACK_LEN 15
1652+#endif
1653+#endif
1654
1655 /*
1656 * Our registers are always unsigned longs, whether we're a 32 bit
1657@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658 (0x7ff >> (PAGE_SHIFT - 12)) : \
1659 (0x3ffff >> (PAGE_SHIFT - 12)))
1660
1661-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662-#define arch_randomize_brk arch_randomize_brk
1663-
1664 #endif /* __KERNEL__ */
1665
1666 /*
1667diff -urNp linux-3.0.4/arch/powerpc/include/asm/kmap_types.h linux-3.0.4/arch/powerpc/include/asm/kmap_types.h
1668--- linux-3.0.4/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1669+++ linux-3.0.4/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1670@@ -27,6 +27,7 @@ enum km_type {
1671 KM_PPC_SYNC_PAGE,
1672 KM_PPC_SYNC_ICACHE,
1673 KM_KDB,
1674+ KM_CLEARPAGE,
1675 KM_TYPE_NR
1676 };
1677
1678diff -urNp linux-3.0.4/arch/powerpc/include/asm/mman.h linux-3.0.4/arch/powerpc/include/asm/mman.h
1679--- linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680+++ linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682 }
1683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684
1685-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687 {
1688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689 }
1690diff -urNp linux-3.0.4/arch/powerpc/include/asm/page_64.h linux-3.0.4/arch/powerpc/include/asm/page_64.h
1691--- linux-3.0.4/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
1692+++ linux-3.0.4/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
1693@@ -155,15 +155,18 @@ do { \
1694 * stack by default, so in the absence of a PT_GNU_STACK program header
1695 * we turn execute permission off.
1696 */
1697-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1698- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699+#define VM_STACK_DEFAULT_FLAGS32 \
1700+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702
1703 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1704 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705
1706+#ifndef CONFIG_PAX_PAGEEXEC
1707 #define VM_STACK_DEFAULT_FLAGS \
1708 (is_32bit_task() ? \
1709 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710+#endif
1711
1712 #include <asm-generic/getorder.h>
1713
1714diff -urNp linux-3.0.4/arch/powerpc/include/asm/page.h linux-3.0.4/arch/powerpc/include/asm/page.h
1715--- linux-3.0.4/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716+++ linux-3.0.4/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718 * and needs to be executable. This means the whole heap ends
1719 * up being executable.
1720 */
1721-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1722- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723+#define VM_DATA_DEFAULT_FLAGS32 \
1724+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726
1727 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1728 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1731 #endif
1732
1733+#define ktla_ktva(addr) (addr)
1734+#define ktva_ktla(addr) (addr)
1735+
1736 #ifndef __ASSEMBLY__
1737
1738 #undef STRICT_MM_TYPECHECKS
1739diff -urNp linux-3.0.4/arch/powerpc/include/asm/pgtable.h linux-3.0.4/arch/powerpc/include/asm/pgtable.h
1740--- linux-3.0.4/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1741+++ linux-3.0.4/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1742@@ -2,6 +2,7 @@
1743 #define _ASM_POWERPC_PGTABLE_H
1744 #ifdef __KERNEL__
1745
1746+#include <linux/const.h>
1747 #ifndef __ASSEMBLY__
1748 #include <asm/processor.h> /* For TASK_SIZE */
1749 #include <asm/mmu.h>
1750diff -urNp linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h
1751--- linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
1752+++ linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
1753@@ -21,6 +21,7 @@
1754 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1755 #define _PAGE_USER 0x004 /* usermode access allowed */
1756 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1757+#define _PAGE_EXEC _PAGE_GUARDED
1758 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1759 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1760 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1761diff -urNp linux-3.0.4/arch/powerpc/include/asm/reg.h linux-3.0.4/arch/powerpc/include/asm/reg.h
1762--- linux-3.0.4/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
1763+++ linux-3.0.4/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
1764@@ -209,6 +209,7 @@
1765 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1766 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1767 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1768+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1769 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1770 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1771 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1772diff -urNp linux-3.0.4/arch/powerpc/include/asm/system.h linux-3.0.4/arch/powerpc/include/asm/system.h
1773--- linux-3.0.4/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1774+++ linux-3.0.4/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1775@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777 #endif
1778
1779-extern unsigned long arch_align_stack(unsigned long sp);
1780+#define arch_align_stack(x) ((x) & ~0xfUL)
1781
1782 /* Used in very early kernel initialization. */
1783 extern unsigned long reloc_offset(void);
1784diff -urNp linux-3.0.4/arch/powerpc/include/asm/uaccess.h linux-3.0.4/arch/powerpc/include/asm/uaccess.h
1785--- linux-3.0.4/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1786+++ linux-3.0.4/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1787@@ -13,6 +13,8 @@
1788 #define VERIFY_READ 0
1789 #define VERIFY_WRITE 1
1790
1791+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792+
1793 /*
1794 * The fs value determines whether argument validity checking should be
1795 * performed or not. If get_fs() == USER_DS, checking is performed, with
1796@@ -327,52 +329,6 @@ do { \
1797 extern unsigned long __copy_tofrom_user(void __user *to,
1798 const void __user *from, unsigned long size);
1799
1800-#ifndef __powerpc64__
1801-
1802-static inline unsigned long copy_from_user(void *to,
1803- const void __user *from, unsigned long n)
1804-{
1805- unsigned long over;
1806-
1807- if (access_ok(VERIFY_READ, from, n))
1808- return __copy_tofrom_user((__force void __user *)to, from, n);
1809- if ((unsigned long)from < TASK_SIZE) {
1810- over = (unsigned long)from + n - TASK_SIZE;
1811- return __copy_tofrom_user((__force void __user *)to, from,
1812- n - over) + over;
1813- }
1814- return n;
1815-}
1816-
1817-static inline unsigned long copy_to_user(void __user *to,
1818- const void *from, unsigned long n)
1819-{
1820- unsigned long over;
1821-
1822- if (access_ok(VERIFY_WRITE, to, n))
1823- return __copy_tofrom_user(to, (__force void __user *)from, n);
1824- if ((unsigned long)to < TASK_SIZE) {
1825- over = (unsigned long)to + n - TASK_SIZE;
1826- return __copy_tofrom_user(to, (__force void __user *)from,
1827- n - over) + over;
1828- }
1829- return n;
1830-}
1831-
1832-#else /* __powerpc64__ */
1833-
1834-#define __copy_in_user(to, from, size) \
1835- __copy_tofrom_user((to), (from), (size))
1836-
1837-extern unsigned long copy_from_user(void *to, const void __user *from,
1838- unsigned long n);
1839-extern unsigned long copy_to_user(void __user *to, const void *from,
1840- unsigned long n);
1841-extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842- unsigned long n);
1843-
1844-#endif /* __powerpc64__ */
1845-
1846 static inline unsigned long __copy_from_user_inatomic(void *to,
1847 const void __user *from, unsigned long n)
1848 {
1849@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850 if (ret == 0)
1851 return 0;
1852 }
1853+
1854+ if (!__builtin_constant_p(n))
1855+ check_object_size(to, n, false);
1856+
1857 return __copy_tofrom_user((__force void __user *)to, from, n);
1858 }
1859
1860@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861 if (ret == 0)
1862 return 0;
1863 }
1864+
1865+ if (!__builtin_constant_p(n))
1866+ check_object_size(from, n, true);
1867+
1868 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869 }
1870
1871@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872 return __copy_to_user_inatomic(to, from, size);
1873 }
1874
1875+#ifndef __powerpc64__
1876+
1877+static inline unsigned long __must_check copy_from_user(void *to,
1878+ const void __user *from, unsigned long n)
1879+{
1880+ unsigned long over;
1881+
1882+ if ((long)n < 0)
1883+ return n;
1884+
1885+ if (access_ok(VERIFY_READ, from, n)) {
1886+ if (!__builtin_constant_p(n))
1887+ check_object_size(to, n, false);
1888+ return __copy_tofrom_user((__force void __user *)to, from, n);
1889+ }
1890+ if ((unsigned long)from < TASK_SIZE) {
1891+ over = (unsigned long)from + n - TASK_SIZE;
1892+ if (!__builtin_constant_p(n - over))
1893+ check_object_size(to, n - over, false);
1894+ return __copy_tofrom_user((__force void __user *)to, from,
1895+ n - over) + over;
1896+ }
1897+ return n;
1898+}
1899+
1900+static inline unsigned long __must_check copy_to_user(void __user *to,
1901+ const void *from, unsigned long n)
1902+{
1903+ unsigned long over;
1904+
1905+ if ((long)n < 0)
1906+ return n;
1907+
1908+ if (access_ok(VERIFY_WRITE, to, n)) {
1909+ if (!__builtin_constant_p(n))
1910+ check_object_size(from, n, true);
1911+ return __copy_tofrom_user(to, (__force void __user *)from, n);
1912+ }
1913+ if ((unsigned long)to < TASK_SIZE) {
1914+ over = (unsigned long)to + n - TASK_SIZE;
1915+ if (!__builtin_constant_p(n))
1916+ check_object_size(from, n - over, true);
1917+ return __copy_tofrom_user(to, (__force void __user *)from,
1918+ n - over) + over;
1919+ }
1920+ return n;
1921+}
1922+
1923+#else /* __powerpc64__ */
1924+
1925+#define __copy_in_user(to, from, size) \
1926+ __copy_tofrom_user((to), (from), (size))
1927+
1928+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929+{
1930+ if ((long)n < 0 || n > INT_MAX)
1931+ return n;
1932+
1933+ if (!__builtin_constant_p(n))
1934+ check_object_size(to, n, false);
1935+
1936+ if (likely(access_ok(VERIFY_READ, from, n)))
1937+ n = __copy_from_user(to, from, n);
1938+ else
1939+ memset(to, 0, n);
1940+ return n;
1941+}
1942+
1943+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944+{
1945+ if ((long)n < 0 || n > INT_MAX)
1946+ return n;
1947+
1948+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949+ if (!__builtin_constant_p(n))
1950+ check_object_size(from, n, true);
1951+ n = __copy_to_user(to, from, n);
1952+ }
1953+ return n;
1954+}
1955+
1956+extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957+ unsigned long n);
1958+
1959+#endif /* __powerpc64__ */
1960+
1961 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962
1963 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S
1965--- linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
1966+++ linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
1967@@ -567,6 +567,7 @@ storage_fault_common:
1968 std r14,_DAR(r1)
1969 std r15,_DSISR(r1)
1970 addi r3,r1,STACK_FRAME_OVERHEAD
1971+ bl .save_nvgprs
1972 mr r4,r14
1973 mr r5,r15
1974 ld r14,PACA_EXGEN+EX_R14(r13)
1975@@ -576,8 +577,7 @@ storage_fault_common:
1976 cmpdi r3,0
1977 bne- 1f
1978 b .ret_from_except_lite
1979-1: bl .save_nvgprs
1980- mr r5,r3
1981+1: mr r5,r3
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 ld r4,_DAR(r1)
1984 bl .bad_page_fault
1985diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S
1986--- linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
1987+++ linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
1988@@ -956,10 +956,10 @@ handle_page_fault:
1989 11: ld r4,_DAR(r1)
1990 ld r5,_DSISR(r1)
1991 addi r3,r1,STACK_FRAME_OVERHEAD
1992+ bl .save_nvgprs
1993 bl .do_page_fault
1994 cmpdi r3,0
1995 beq+ 13f
1996- bl .save_nvgprs
1997 mr r5,r3
1998 addi r3,r1,STACK_FRAME_OVERHEAD
1999 lwz r4,_DAR(r1)
2000diff -urNp linux-3.0.4/arch/powerpc/kernel/module_32.c linux-3.0.4/arch/powerpc/kernel/module_32.c
2001--- linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002+++ linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004 me->arch.core_plt_section = i;
2005 }
2006 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007- printk("Module doesn't contain .plt or .init.plt sections.\n");
2008+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009 return -ENOEXEC;
2010 }
2011
2012@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013
2014 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015 /* Init, or core PLT? */
2016- if (location >= mod->module_core
2017- && location < mod->module_core + mod->core_size)
2018+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021- else
2022+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025+ else {
2026+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027+ return ~0UL;
2028+ }
2029
2030 /* Find this entry, or if that fails, the next avail. entry */
2031 while (entry->jump[0]) {
2032diff -urNp linux-3.0.4/arch/powerpc/kernel/module.c linux-3.0.4/arch/powerpc/kernel/module.c
2033--- linux-3.0.4/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2034+++ linux-3.0.4/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2035@@ -31,11 +31,24 @@
2036
2037 LIST_HEAD(module_bug_list);
2038
2039+#ifdef CONFIG_PAX_KERNEXEC
2040 void *module_alloc(unsigned long size)
2041 {
2042 if (size == 0)
2043 return NULL;
2044
2045+ return vmalloc(size);
2046+}
2047+
2048+void *module_alloc_exec(unsigned long size)
2049+#else
2050+void *module_alloc(unsigned long size)
2051+#endif
2052+
2053+{
2054+ if (size == 0)
2055+ return NULL;
2056+
2057 return vmalloc_exec(size);
2058 }
2059
2060@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061 vfree(module_region);
2062 }
2063
2064+#ifdef CONFIG_PAX_KERNEXEC
2065+void module_free_exec(struct module *mod, void *module_region)
2066+{
2067+ module_free(mod, module_region);
2068+}
2069+#endif
2070+
2071 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072 const Elf_Shdr *sechdrs,
2073 const char *name)
2074diff -urNp linux-3.0.4/arch/powerpc/kernel/process.c linux-3.0.4/arch/powerpc/kernel/process.c
2075--- linux-3.0.4/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2076+++ linux-3.0.4/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2077@@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078 * Lookup NIP late so we have the best change of getting the
2079 * above info out without failing
2080 */
2081- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085 #endif
2086 show_stack(current, (unsigned long *) regs->gpr[1]);
2087 if (!user_mode(regs))
2088@@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089 newsp = stack[0];
2090 ip = stack[STACK_FRAME_LR_SAVE];
2091 if (!firstframe || ip != lr) {
2092- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096- printk(" (%pS)",
2097+ printk(" (%pA)",
2098 (void *)current->ret_stack[curr_frame].ret);
2099 curr_frame--;
2100 }
2101@@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102 struct pt_regs *regs = (struct pt_regs *)
2103 (sp + STACK_FRAME_OVERHEAD);
2104 lr = regs->link;
2105- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2106+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2107 regs->trap, (void *)regs->nip, (void *)lr);
2108 firstframe = 1;
2109 }
2110@@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111 }
2112
2113 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114-
2115-unsigned long arch_align_stack(unsigned long sp)
2116-{
2117- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118- sp -= get_random_int() & ~PAGE_MASK;
2119- return sp & ~0xf;
2120-}
2121-
2122-static inline unsigned long brk_rnd(void)
2123-{
2124- unsigned long rnd = 0;
2125-
2126- /* 8MB for 32bit, 1GB for 64bit */
2127- if (is_32bit_task())
2128- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129- else
2130- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131-
2132- return rnd << PAGE_SHIFT;
2133-}
2134-
2135-unsigned long arch_randomize_brk(struct mm_struct *mm)
2136-{
2137- unsigned long base = mm->brk;
2138- unsigned long ret;
2139-
2140-#ifdef CONFIG_PPC_STD_MMU_64
2141- /*
2142- * If we are using 1TB segments and we are allowed to randomise
2143- * the heap, we can put it above 1TB so it is backed by a 1TB
2144- * segment. Otherwise the heap will be in the bottom 1TB
2145- * which always uses 256MB segments and this may result in a
2146- * performance penalty.
2147- */
2148- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150-#endif
2151-
2152- ret = PAGE_ALIGN(base + brk_rnd());
2153-
2154- if (ret < mm->brk)
2155- return mm->brk;
2156-
2157- return ret;
2158-}
2159-
2160-unsigned long randomize_et_dyn(unsigned long base)
2161-{
2162- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163-
2164- if (ret < base)
2165- return base;
2166-
2167- return ret;
2168-}
2169diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_32.c linux-3.0.4/arch/powerpc/kernel/signal_32.c
2170--- linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171+++ linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173 /* Save user registers on the stack */
2174 frame = &rt_sf->uc.uc_mcontext;
2175 addr = frame;
2176- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178 if (save_user_regs(regs, frame, 0, 1))
2179 goto badframe;
2180 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_64.c linux-3.0.4/arch/powerpc/kernel/signal_64.c
2182--- linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183+++ linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185 current->thread.fpscr.val = 0;
2186
2187 /* Set up to return from userspace. */
2188- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191 } else {
2192 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193diff -urNp linux-3.0.4/arch/powerpc/kernel/traps.c linux-3.0.4/arch/powerpc/kernel/traps.c
2194--- linux-3.0.4/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2195+++ linux-3.0.4/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2196@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197 static inline void pmac_backlight_unblank(void) { }
2198 #endif
2199
2200+extern void gr_handle_kernel_exploit(void);
2201+
2202 int die(const char *str, struct pt_regs *regs, long err)
2203 {
2204 static struct {
2205@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2206 if (panic_on_oops)
2207 panic("Fatal exception");
2208
2209+ gr_handle_kernel_exploit();
2210+
2211 oops_exit();
2212 do_exit(err);
2213
2214diff -urNp linux-3.0.4/arch/powerpc/kernel/vdso.c linux-3.0.4/arch/powerpc/kernel/vdso.c
2215--- linux-3.0.4/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2216+++ linux-3.0.4/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2217@@ -36,6 +36,7 @@
2218 #include <asm/firmware.h>
2219 #include <asm/vdso.h>
2220 #include <asm/vdso_datapage.h>
2221+#include <asm/mman.h>
2222
2223 #include "setup.h"
2224
2225@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = VDSO32_MBASE;
2227 #endif
2228
2229- current->mm->context.vdso_base = 0;
2230+ current->mm->context.vdso_base = ~0UL;
2231
2232 /* vDSO has a problem and was disabled, just don't "enable" it for the
2233 * process
2234@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235 vdso_base = get_unmapped_area(NULL, vdso_base,
2236 (vdso_pages << PAGE_SHIFT) +
2237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238- 0, 0);
2239+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2240 if (IS_ERR_VALUE(vdso_base)) {
2241 rc = vdso_base;
2242 goto fail_mmapsem;
2243diff -urNp linux-3.0.4/arch/powerpc/lib/usercopy_64.c linux-3.0.4/arch/powerpc/lib/usercopy_64.c
2244--- linux-3.0.4/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2245+++ linux-3.0.4/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2246@@ -9,22 +9,6 @@
2247 #include <linux/module.h>
2248 #include <asm/uaccess.h>
2249
2250-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251-{
2252- if (likely(access_ok(VERIFY_READ, from, n)))
2253- n = __copy_from_user(to, from, n);
2254- else
2255- memset(to, 0, n);
2256- return n;
2257-}
2258-
2259-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260-{
2261- if (likely(access_ok(VERIFY_WRITE, to, n)))
2262- n = __copy_to_user(to, from, n);
2263- return n;
2264-}
2265-
2266 unsigned long copy_in_user(void __user *to, const void __user *from,
2267 unsigned long n)
2268 {
2269@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270 return n;
2271 }
2272
2273-EXPORT_SYMBOL(copy_from_user);
2274-EXPORT_SYMBOL(copy_to_user);
2275 EXPORT_SYMBOL(copy_in_user);
2276
2277diff -urNp linux-3.0.4/arch/powerpc/mm/fault.c linux-3.0.4/arch/powerpc/mm/fault.c
2278--- linux-3.0.4/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279+++ linux-3.0.4/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280@@ -32,6 +32,10 @@
2281 #include <linux/perf_event.h>
2282 #include <linux/magic.h>
2283 #include <linux/ratelimit.h>
2284+#include <linux/slab.h>
2285+#include <linux/pagemap.h>
2286+#include <linux/compiler.h>
2287+#include <linux/unistd.h>
2288
2289 #include <asm/firmware.h>
2290 #include <asm/page.h>
2291@@ -43,6 +47,7 @@
2292 #include <asm/tlbflush.h>
2293 #include <asm/siginfo.h>
2294 #include <mm/mmu_decl.h>
2295+#include <asm/ptrace.h>
2296
2297 #ifdef CONFIG_KPROBES
2298 static inline int notify_page_fault(struct pt_regs *regs)
2299@@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300 }
2301 #endif
2302
2303+#ifdef CONFIG_PAX_PAGEEXEC
2304+/*
2305+ * PaX: decide what to do with offenders (regs->nip = fault address)
2306+ *
2307+ * returns 1 when task should be killed
2308+ */
2309+static int pax_handle_fetch_fault(struct pt_regs *regs)
2310+{
2311+ return 1;
2312+}
2313+
2314+void pax_report_insns(void *pc, void *sp)
2315+{
2316+ unsigned long i;
2317+
2318+ printk(KERN_ERR "PAX: bytes at PC: ");
2319+ for (i = 0; i < 5; i++) {
2320+ unsigned int c;
2321+ if (get_user(c, (unsigned int __user *)pc+i))
2322+ printk(KERN_CONT "???????? ");
2323+ else
2324+ printk(KERN_CONT "%08x ", c);
2325+ }
2326+ printk("\n");
2327+}
2328+#endif
2329+
2330 /*
2331 * Check whether the instruction at regs->nip is a store using
2332 * an update addressing form which will update r1.
2333@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334 * indicate errors in DSISR but can validly be set in SRR1.
2335 */
2336 if (trap == 0x400)
2337- error_code &= 0x48200000;
2338+ error_code &= 0x58200000;
2339 else
2340 is_write = error_code & DSISR_ISSTORE;
2341 #else
2342@@ -259,7 +291,7 @@ good_area:
2343 * "undefined". Of those that can be set, this is the only
2344 * one which seems bad.
2345 */
2346- if (error_code & 0x10000000)
2347+ if (error_code & DSISR_GUARDED)
2348 /* Guarded storage error. */
2349 goto bad_area;
2350 #endif /* CONFIG_8xx */
2351@@ -274,7 +306,7 @@ good_area:
2352 * processors use the same I/D cache coherency mechanism
2353 * as embedded.
2354 */
2355- if (error_code & DSISR_PROTFAULT)
2356+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357 goto bad_area;
2358 #endif /* CONFIG_PPC_STD_MMU */
2359
2360@@ -343,6 +375,23 @@ bad_area:
2361 bad_area_nosemaphore:
2362 /* User mode accesses cause a SIGSEGV */
2363 if (user_mode(regs)) {
2364+
2365+#ifdef CONFIG_PAX_PAGEEXEC
2366+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367+#ifdef CONFIG_PPC_STD_MMU
2368+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369+#else
2370+ if (is_exec && regs->nip == address) {
2371+#endif
2372+ switch (pax_handle_fetch_fault(regs)) {
2373+ }
2374+
2375+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376+ do_group_exit(SIGKILL);
2377+ }
2378+ }
2379+#endif
2380+
2381 _exception(SIGSEGV, regs, code, address);
2382 return 0;
2383 }
2384diff -urNp linux-3.0.4/arch/powerpc/mm/mmap_64.c linux-3.0.4/arch/powerpc/mm/mmap_64.c
2385--- linux-3.0.4/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2386+++ linux-3.0.4/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2387@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388 */
2389 if (mmap_is_legacy()) {
2390 mm->mmap_base = TASK_UNMAPPED_BASE;
2391+
2392+#ifdef CONFIG_PAX_RANDMMAP
2393+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2394+ mm->mmap_base += mm->delta_mmap;
2395+#endif
2396+
2397 mm->get_unmapped_area = arch_get_unmapped_area;
2398 mm->unmap_area = arch_unmap_area;
2399 } else {
2400 mm->mmap_base = mmap_base();
2401+
2402+#ifdef CONFIG_PAX_RANDMMAP
2403+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2404+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405+#endif
2406+
2407 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408 mm->unmap_area = arch_unmap_area_topdown;
2409 }
2410diff -urNp linux-3.0.4/arch/powerpc/mm/slice.c linux-3.0.4/arch/powerpc/mm/slice.c
2411--- linux-3.0.4/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412+++ linux-3.0.4/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414 if ((mm->task_size - len) < addr)
2415 return 0;
2416 vma = find_vma(mm, addr);
2417- return (!vma || (addr + len) <= vma->vm_start);
2418+ return check_heap_stack_gap(vma, addr, len);
2419 }
2420
2421 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422@@ -256,7 +256,7 @@ full_search:
2423 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2424 continue;
2425 }
2426- if (!vma || addr + len <= vma->vm_start) {
2427+ if (check_heap_stack_gap(vma, addr, len)) {
2428 /*
2429 * Remember the place where we stopped the search:
2430 */
2431@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432 }
2433 }
2434
2435- addr = mm->mmap_base;
2436- while (addr > len) {
2437+ if (mm->mmap_base < len)
2438+ addr = -ENOMEM;
2439+ else
2440+ addr = mm->mmap_base - len;
2441+
2442+ while (!IS_ERR_VALUE(addr)) {
2443 /* Go down by chunk size */
2444- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446
2447 /* Check for hit with different page size */
2448 mask = slice_range_to_mask(addr, len);
2449@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450 * return with success:
2451 */
2452 vma = find_vma(mm, addr);
2453- if (!vma || (addr + len) <= vma->vm_start) {
2454+ if (check_heap_stack_gap(vma, addr, len)) {
2455 /* remember the address as a hint for next time */
2456 if (use_cache)
2457 mm->free_area_cache = addr;
2458@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459 mm->cached_hole_size = vma->vm_start - addr;
2460
2461 /* try just below the current vma->vm_start */
2462- addr = vma->vm_start;
2463+ addr = skip_heap_stack_gap(vma, len);
2464 }
2465
2466 /*
2467@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468 if (fixed && addr > (mm->task_size - len))
2469 return -EINVAL;
2470
2471+#ifdef CONFIG_PAX_RANDMMAP
2472+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473+ addr = 0;
2474+#endif
2475+
2476 /* If hint, make sure it matches our alignment restrictions */
2477 if (!fixed && addr) {
2478 addr = _ALIGN_UP(addr, 1ul << pshift);
2479diff -urNp linux-3.0.4/arch/s390/include/asm/elf.h linux-3.0.4/arch/s390/include/asm/elf.h
2480--- linux-3.0.4/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2481+++ linux-3.0.4/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2482@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483 the loader. We need to make sure that it is out of the way of the program
2484 that it will "exec", and that there is sufficient room for the brk. */
2485
2486-extern unsigned long randomize_et_dyn(unsigned long base);
2487-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2488+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2489+
2490+#ifdef CONFIG_PAX_ASLR
2491+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492+
2493+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495+#endif
2496
2497 /* This yields a mask that user programs can use to figure out what
2498 instruction set this CPU supports. */
2499@@ -210,7 +216,4 @@ struct linux_binprm;
2500 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501 int arch_setup_additional_pages(struct linux_binprm *, int);
2502
2503-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504-#define arch_randomize_brk arch_randomize_brk
2505-
2506 #endif
2507diff -urNp linux-3.0.4/arch/s390/include/asm/system.h linux-3.0.4/arch/s390/include/asm/system.h
2508--- linux-3.0.4/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2509+++ linux-3.0.4/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2510@@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511 extern void (*_machine_halt)(void);
2512 extern void (*_machine_power_off)(void);
2513
2514-extern unsigned long arch_align_stack(unsigned long sp);
2515+#define arch_align_stack(x) ((x) & ~0xfUL)
2516
2517 static inline int tprot(unsigned long addr)
2518 {
2519diff -urNp linux-3.0.4/arch/s390/include/asm/uaccess.h linux-3.0.4/arch/s390/include/asm/uaccess.h
2520--- linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521+++ linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523 copy_to_user(void __user *to, const void *from, unsigned long n)
2524 {
2525 might_fault();
2526+
2527+ if ((long)n < 0)
2528+ return n;
2529+
2530 if (access_ok(VERIFY_WRITE, to, n))
2531 n = __copy_to_user(to, from, n);
2532 return n;
2533@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534 static inline unsigned long __must_check
2535 __copy_from_user(void *to, const void __user *from, unsigned long n)
2536 {
2537+ if ((long)n < 0)
2538+ return n;
2539+
2540 if (__builtin_constant_p(n) && (n <= 256))
2541 return uaccess.copy_from_user_small(n, from, to);
2542 else
2543@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544 unsigned int sz = __compiletime_object_size(to);
2545
2546 might_fault();
2547+
2548+ if ((long)n < 0)
2549+ return n;
2550+
2551 if (unlikely(sz != -1 && sz < n)) {
2552 copy_from_user_overflow();
2553 return n;
2554diff -urNp linux-3.0.4/arch/s390/kernel/module.c linux-3.0.4/arch/s390/kernel/module.c
2555--- linux-3.0.4/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2556+++ linux-3.0.4/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2557@@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558
2559 /* Increase core size by size of got & plt and set start
2560 offsets for got and plt. */
2561- me->core_size = ALIGN(me->core_size, 4);
2562- me->arch.got_offset = me->core_size;
2563- me->core_size += me->arch.got_size;
2564- me->arch.plt_offset = me->core_size;
2565- me->core_size += me->arch.plt_size;
2566+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567+ me->arch.got_offset = me->core_size_rw;
2568+ me->core_size_rw += me->arch.got_size;
2569+ me->arch.plt_offset = me->core_size_rx;
2570+ me->core_size_rx += me->arch.plt_size;
2571 return 0;
2572 }
2573
2574@@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575 if (info->got_initialized == 0) {
2576 Elf_Addr *gotent;
2577
2578- gotent = me->module_core + me->arch.got_offset +
2579+ gotent = me->module_core_rw + me->arch.got_offset +
2580 info->got_offset;
2581 *gotent = val;
2582 info->got_initialized = 1;
2583@@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 else if (r_type == R_390_GOTENT ||
2585 r_type == R_390_GOTPLTENT)
2586 *(unsigned int *) loc =
2587- (val + (Elf_Addr) me->module_core - loc) >> 1;
2588+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589 else if (r_type == R_390_GOT64 ||
2590 r_type == R_390_GOTPLT64)
2591 *(unsigned long *) loc = val;
2592@@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2594 if (info->plt_initialized == 0) {
2595 unsigned int *ip;
2596- ip = me->module_core + me->arch.plt_offset +
2597+ ip = me->module_core_rx + me->arch.plt_offset +
2598 info->plt_offset;
2599 #ifndef CONFIG_64BIT
2600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601@@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 val - loc + 0xffffUL < 0x1ffffeUL) ||
2603 (r_type == R_390_PLT32DBL &&
2604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605- val = (Elf_Addr) me->module_core +
2606+ val = (Elf_Addr) me->module_core_rx +
2607 me->arch.plt_offset +
2608 info->plt_offset;
2609 val += rela->r_addend - loc;
2610@@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2613 val = val + rela->r_addend -
2614- ((Elf_Addr) me->module_core + me->arch.got_offset);
2615+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616 if (r_type == R_390_GOTOFF16)
2617 *(unsigned short *) loc = val;
2618 else if (r_type == R_390_GOTOFF32)
2619@@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 break;
2621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2623- val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625 rela->r_addend - loc;
2626 if (r_type == R_390_GOTPC)
2627 *(unsigned int *) loc = val;
2628diff -urNp linux-3.0.4/arch/s390/kernel/process.c linux-3.0.4/arch/s390/kernel/process.c
2629--- linux-3.0.4/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2630+++ linux-3.0.4/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2631@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632 }
2633 return 0;
2634 }
2635-
2636-unsigned long arch_align_stack(unsigned long sp)
2637-{
2638- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639- sp -= get_random_int() & ~PAGE_MASK;
2640- return sp & ~0xf;
2641-}
2642-
2643-static inline unsigned long brk_rnd(void)
2644-{
2645- /* 8MB for 32bit, 1GB for 64bit */
2646- if (is_32bit_task())
2647- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648- else
2649- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650-}
2651-
2652-unsigned long arch_randomize_brk(struct mm_struct *mm)
2653-{
2654- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655-
2656- if (ret < mm->brk)
2657- return mm->brk;
2658- return ret;
2659-}
2660-
2661-unsigned long randomize_et_dyn(unsigned long base)
2662-{
2663- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664-
2665- if (!(current->flags & PF_RANDOMIZE))
2666- return base;
2667- if (ret < base)
2668- return base;
2669- return ret;
2670-}
2671diff -urNp linux-3.0.4/arch/s390/kernel/setup.c linux-3.0.4/arch/s390/kernel/setup.c
2672--- linux-3.0.4/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2673+++ linux-3.0.4/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2674@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675 }
2676 early_param("mem", early_parse_mem);
2677
2678-unsigned int user_mode = HOME_SPACE_MODE;
2679+unsigned int user_mode = SECONDARY_SPACE_MODE;
2680 EXPORT_SYMBOL_GPL(user_mode);
2681
2682 static int set_amode_and_uaccess(unsigned long user_amode,
2683diff -urNp linux-3.0.4/arch/s390/mm/mmap.c linux-3.0.4/arch/s390/mm/mmap.c
2684--- linux-3.0.4/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2685+++ linux-3.0.4/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2686@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690+
2691+#ifdef CONFIG_PAX_RANDMMAP
2692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2693+ mm->mmap_base += mm->delta_mmap;
2694+#endif
2695+
2696 mm->get_unmapped_area = arch_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700+
2701+#ifdef CONFIG_PAX_RANDMMAP
2702+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2703+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704+#endif
2705+
2706 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713+
2714+#ifdef CONFIG_PAX_RANDMMAP
2715+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2716+ mm->mmap_base += mm->delta_mmap;
2717+#endif
2718+
2719 mm->get_unmapped_area = s390_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723+
2724+#ifdef CONFIG_PAX_RANDMMAP
2725+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2726+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727+#endif
2728+
2729 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732diff -urNp linux-3.0.4/arch/score/include/asm/system.h linux-3.0.4/arch/score/include/asm/system.h
2733--- linux-3.0.4/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734+++ linux-3.0.4/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735@@ -17,7 +17,7 @@ do { \
2736 #define finish_arch_switch(prev) do {} while (0)
2737
2738 typedef void (*vi_handler_t)(void);
2739-extern unsigned long arch_align_stack(unsigned long sp);
2740+#define arch_align_stack(x) (x)
2741
2742 #define mb() barrier()
2743 #define rmb() barrier()
2744diff -urNp linux-3.0.4/arch/score/kernel/process.c linux-3.0.4/arch/score/kernel/process.c
2745--- linux-3.0.4/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2746+++ linux-3.0.4/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2747@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748
2749 return task_pt_regs(task)->cp0_epc;
2750 }
2751-
2752-unsigned long arch_align_stack(unsigned long sp)
2753-{
2754- return sp;
2755-}
2756diff -urNp linux-3.0.4/arch/sh/mm/mmap.c linux-3.0.4/arch/sh/mm/mmap.c
2757--- linux-3.0.4/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2758+++ linux-3.0.4/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2759@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760 addr = PAGE_ALIGN(addr);
2761
2762 vma = find_vma(mm, addr);
2763- if (TASK_SIZE - len >= addr &&
2764- (!vma || addr + len <= vma->vm_start))
2765+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766 return addr;
2767 }
2768
2769@@ -106,7 +105,7 @@ full_search:
2770 }
2771 return -ENOMEM;
2772 }
2773- if (likely(!vma || addr + len <= vma->vm_start)) {
2774+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2775 /*
2776 * Remember the place where we stopped the search:
2777 */
2778@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779 addr = PAGE_ALIGN(addr);
2780
2781 vma = find_vma(mm, addr);
2782- if (TASK_SIZE - len >= addr &&
2783- (!vma || addr + len <= vma->vm_start))
2784+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785 return addr;
2786 }
2787
2788@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789 /* make sure it can fit in the remaining address space */
2790 if (likely(addr > len)) {
2791 vma = find_vma(mm, addr-len);
2792- if (!vma || addr <= vma->vm_start) {
2793+ if (check_heap_stack_gap(vma, addr - len, len)) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr-len);
2796 }
2797@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798 if (unlikely(mm->mmap_base < len))
2799 goto bottomup;
2800
2801- addr = mm->mmap_base-len;
2802- if (do_colour_align)
2803- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804+ addr = mm->mmap_base - len;
2805
2806 do {
2807+ if (do_colour_align)
2808+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809 /*
2810 * Lookup failure means no vma is above this address,
2811 * else if new region fits below vma->vm_start,
2812 * return with success:
2813 */
2814 vma = find_vma(mm, addr);
2815- if (likely(!vma || addr+len <= vma->vm_start)) {
2816+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2817 /* remember the address as a hint for next time */
2818 return (mm->free_area_cache = addr);
2819 }
2820@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821 mm->cached_hole_size = vma->vm_start - addr;
2822
2823 /* try just below the current vma->vm_start */
2824- addr = vma->vm_start-len;
2825- if (do_colour_align)
2826- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827- } while (likely(len < vma->vm_start));
2828+ addr = skip_heap_stack_gap(vma, len);
2829+ } while (!IS_ERR_VALUE(addr));
2830
2831 bottomup:
2832 /*
2833diff -urNp linux-3.0.4/arch/sparc/include/asm/atomic_64.h linux-3.0.4/arch/sparc/include/asm/atomic_64.h
2834--- linux-3.0.4/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
2835+++ linux-3.0.4/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
2836@@ -14,18 +14,40 @@
2837 #define ATOMIC64_INIT(i) { (i) }
2838
2839 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2840+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841+{
2842+ return v->counter;
2843+}
2844 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2845+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846+{
2847+ return v->counter;
2848+}
2849
2850 #define atomic_set(v, i) (((v)->counter) = i)
2851+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852+{
2853+ v->counter = i;
2854+}
2855 #define atomic64_set(v, i) (((v)->counter) = i)
2856+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857+{
2858+ v->counter = i;
2859+}
2860
2861 extern void atomic_add(int, atomic_t *);
2862+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863 extern void atomic64_add(long, atomic64_t *);
2864+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865 extern void atomic_sub(int, atomic_t *);
2866+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867 extern void atomic64_sub(long, atomic64_t *);
2868+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869
2870 extern int atomic_add_ret(int, atomic_t *);
2871+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872 extern long atomic64_add_ret(long, atomic64_t *);
2873+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874 extern int atomic_sub_ret(int, atomic_t *);
2875 extern long atomic64_sub_ret(long, atomic64_t *);
2876
2877@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879
2880 #define atomic_inc_return(v) atomic_add_ret(1, v)
2881+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882+{
2883+ return atomic_add_ret_unchecked(1, v);
2884+}
2885 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887+{
2888+ return atomic64_add_ret_unchecked(1, v);
2889+}
2890
2891 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893
2894 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896+{
2897+ return atomic_add_ret_unchecked(i, v);
2898+}
2899 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901+{
2902+ return atomic64_add_ret_unchecked(i, v);
2903+}
2904
2905 /*
2906 * atomic_inc_and_test - increment and test
2907@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908 * other cases.
2909 */
2910 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912+{
2913+ return atomic_inc_return_unchecked(v) == 0;
2914+}
2915 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916
2917 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920
2921 #define atomic_inc(v) atomic_add(1, v)
2922+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923+{
2924+ atomic_add_unchecked(1, v);
2925+}
2926 #define atomic64_inc(v) atomic64_add(1, v)
2927+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928+{
2929+ atomic64_add_unchecked(1, v);
2930+}
2931
2932 #define atomic_dec(v) atomic_sub(1, v)
2933+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934+{
2935+ atomic_sub_unchecked(1, v);
2936+}
2937 #define atomic64_dec(v) atomic64_sub(1, v)
2938+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939+{
2940+ atomic64_sub_unchecked(1, v);
2941+}
2942
2943 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945
2946 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948+{
2949+ return cmpxchg(&v->counter, old, new);
2950+}
2951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953+{
2954+ return xchg(&v->counter, new);
2955+}
2956
2957 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958 {
2959- int c, old;
2960+ int c, old, new;
2961 c = atomic_read(v);
2962 for (;;) {
2963- if (unlikely(c == (u)))
2964+ if (unlikely(c == u))
2965 break;
2966- old = atomic_cmpxchg((v), c, c + (a));
2967+
2968+ asm volatile("addcc %2, %0, %0\n"
2969+
2970+#ifdef CONFIG_PAX_REFCOUNT
2971+ "tvs %%icc, 6\n"
2972+#endif
2973+
2974+ : "=r" (new)
2975+ : "0" (c), "ir" (a)
2976+ : "cc");
2977+
2978+ old = atomic_cmpxchg(v, c, new);
2979 if (likely(old == c))
2980 break;
2981 c = old;
2982 }
2983- return c != (u);
2984+ return c != u;
2985 }
2986
2987 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989 #define atomic64_cmpxchg(v, o, n) \
2990 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993+{
2994+ return xchg(&v->counter, new);
2995+}
2996
2997 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998 {
2999- long c, old;
3000+ long c, old, new;
3001 c = atomic64_read(v);
3002 for (;;) {
3003- if (unlikely(c == (u)))
3004+ if (unlikely(c == u))
3005 break;
3006- old = atomic64_cmpxchg((v), c, c + (a));
3007+
3008+ asm volatile("addcc %2, %0, %0\n"
3009+
3010+#ifdef CONFIG_PAX_REFCOUNT
3011+ "tvs %%xcc, 6\n"
3012+#endif
3013+
3014+ : "=r" (new)
3015+ : "0" (c), "ir" (a)
3016+ : "cc");
3017+
3018+ old = atomic64_cmpxchg(v, c, new);
3019 if (likely(old == c))
3020 break;
3021 c = old;
3022 }
3023- return c != (u);
3024+ return c != u;
3025 }
3026
3027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028diff -urNp linux-3.0.4/arch/sparc/include/asm/cache.h linux-3.0.4/arch/sparc/include/asm/cache.h
3029--- linux-3.0.4/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3030+++ linux-3.0.4/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3031@@ -10,7 +10,7 @@
3032 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3033
3034 #define L1_CACHE_SHIFT 5
3035-#define L1_CACHE_BYTES 32
3036+#define L1_CACHE_BYTES 32UL
3037
3038 #ifdef CONFIG_SPARC32
3039 #define SMP_CACHE_BYTES_SHIFT 5
3040diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_32.h linux-3.0.4/arch/sparc/include/asm/elf_32.h
3041--- linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042+++ linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043@@ -114,6 +114,13 @@ typedef struct {
3044
3045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3046
3047+#ifdef CONFIG_PAX_ASLR
3048+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3049+
3050+#define PAX_DELTA_MMAP_LEN 16
3051+#define PAX_DELTA_STACK_LEN 16
3052+#endif
3053+
3054 /* This yields a mask that user programs can use to figure out what
3055 instruction set this cpu supports. This can NOT be done in userspace
3056 on Sparc. */
3057diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_64.h linux-3.0.4/arch/sparc/include/asm/elf_64.h
3058--- linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-08-23 21:44:40.000000000 -0400
3059+++ linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060@@ -180,6 +180,13 @@ typedef struct {
3061 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3062 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063
3064+#ifdef CONFIG_PAX_ASLR
3065+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066+
3067+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069+#endif
3070+
3071 extern unsigned long sparc64_elf_hwcap;
3072 #define ELF_HWCAP sparc64_elf_hwcap
3073
3074diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtable_32.h linux-3.0.4/arch/sparc/include/asm/pgtable_32.h
3075--- linux-3.0.4/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3076+++ linux-3.0.4/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3077@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078 BTFIXUPDEF_INT(page_none)
3079 BTFIXUPDEF_INT(page_copy)
3080 BTFIXUPDEF_INT(page_readonly)
3081+
3082+#ifdef CONFIG_PAX_PAGEEXEC
3083+BTFIXUPDEF_INT(page_shared_noexec)
3084+BTFIXUPDEF_INT(page_copy_noexec)
3085+BTFIXUPDEF_INT(page_readonly_noexec)
3086+#endif
3087+
3088 BTFIXUPDEF_INT(page_kernel)
3089
3090 #define PMD_SHIFT SUN4C_PMD_SHIFT
3091@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3093 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3094
3095+#ifdef CONFIG_PAX_PAGEEXEC
3096+extern pgprot_t PAGE_SHARED_NOEXEC;
3097+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3098+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099+#else
3100+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3101+# define PAGE_COPY_NOEXEC PAGE_COPY
3102+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3103+#endif
3104+
3105 extern unsigned long page_kernel;
3106
3107 #ifdef MODULE
3108diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h
3109--- linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3110+++ linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3111@@ -115,6 +115,13 @@
3112 SRMMU_EXEC | SRMMU_REF)
3113 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114 SRMMU_EXEC | SRMMU_REF)
3115+
3116+#ifdef CONFIG_PAX_PAGEEXEC
3117+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120+#endif
3121+
3122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123 SRMMU_DIRTY | SRMMU_REF)
3124
3125diff -urNp linux-3.0.4/arch/sparc/include/asm/spinlock_64.h linux-3.0.4/arch/sparc/include/asm/spinlock_64.h
3126--- linux-3.0.4/arch/sparc/include/asm/spinlock_64.h 2011-07-21 22:17:23.000000000 -0400
3127+++ linux-3.0.4/arch/sparc/include/asm/spinlock_64.h 2011-08-23 21:47:55.000000000 -0400
3128@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129
3130 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131
3132-static void inline arch_read_lock(arch_rwlock_t *lock)
3133+static inline void arch_read_lock(arch_rwlock_t *lock)
3134 {
3135 unsigned long tmp1, tmp2;
3136
3137 __asm__ __volatile__ (
3138 "1: ldsw [%2], %0\n"
3139 " brlz,pn %0, 2f\n"
3140-"4: add %0, 1, %1\n"
3141+"4: addcc %0, 1, %1\n"
3142+
3143+#ifdef CONFIG_PAX_REFCOUNT
3144+" tvs %%icc, 6\n"
3145+#endif
3146+
3147 " cas [%2], %0, %1\n"
3148 " cmp %0, %1\n"
3149 " bne,pn %%icc, 1b\n"
3150@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151 " .previous"
3152 : "=&r" (tmp1), "=&r" (tmp2)
3153 : "r" (lock)
3154- : "memory");
3155+ : "memory", "cc");
3156 }
3157
3158-static int inline arch_read_trylock(arch_rwlock_t *lock)
3159+static inline int arch_read_trylock(arch_rwlock_t *lock)
3160 {
3161 int tmp1, tmp2;
3162
3163@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164 "1: ldsw [%2], %0\n"
3165 " brlz,a,pn %0, 2f\n"
3166 " mov 0, %0\n"
3167-" add %0, 1, %1\n"
3168+" addcc %0, 1, %1\n"
3169+
3170+#ifdef CONFIG_PAX_REFCOUNT
3171+" tvs %%icc, 6\n"
3172+#endif
3173+
3174 " cas [%2], %0, %1\n"
3175 " cmp %0, %1\n"
3176 " bne,pn %%icc, 1b\n"
3177@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178 return tmp1;
3179 }
3180
3181-static void inline arch_read_unlock(arch_rwlock_t *lock)
3182+static inline void arch_read_unlock(arch_rwlock_t *lock)
3183 {
3184 unsigned long tmp1, tmp2;
3185
3186 __asm__ __volatile__(
3187 "1: lduw [%2], %0\n"
3188-" sub %0, 1, %1\n"
3189+" subcc %0, 1, %1\n"
3190+
3191+#ifdef CONFIG_PAX_REFCOUNT
3192+" tvs %%icc, 6\n"
3193+#endif
3194+
3195 " cas [%2], %0, %1\n"
3196 " cmp %0, %1\n"
3197 " bne,pn %%xcc, 1b\n"
3198@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199 : "memory");
3200 }
3201
3202-static void inline arch_write_lock(arch_rwlock_t *lock)
3203+static inline void arch_write_lock(arch_rwlock_t *lock)
3204 {
3205 unsigned long mask, tmp1, tmp2;
3206
3207@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208 : "memory");
3209 }
3210
3211-static void inline arch_write_unlock(arch_rwlock_t *lock)
3212+static inline void arch_write_unlock(arch_rwlock_t *lock)
3213 {
3214 __asm__ __volatile__(
3215 " stw %%g0, [%0]"
3216@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217 : "memory");
3218 }
3219
3220-static int inline arch_write_trylock(arch_rwlock_t *lock)
3221+static inline int arch_write_trylock(arch_rwlock_t *lock)
3222 {
3223 unsigned long mask, tmp1, tmp2, result;
3224
3225diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_32.h linux-3.0.4/arch/sparc/include/asm/thread_info_32.h
3226--- linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227+++ linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228@@ -50,6 +50,8 @@ struct thread_info {
3229 unsigned long w_saved;
3230
3231 struct restart_block restart_block;
3232+
3233+ unsigned long lowest_stack;
3234 };
3235
3236 /*
3237diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_64.h linux-3.0.4/arch/sparc/include/asm/thread_info_64.h
3238--- linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239+++ linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240@@ -63,6 +63,8 @@ struct thread_info {
3241 struct pt_regs *kern_una_regs;
3242 unsigned int kern_una_insn;
3243
3244+ unsigned long lowest_stack;
3245+
3246 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3247 };
3248
3249diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_32.h linux-3.0.4/arch/sparc/include/asm/uaccess_32.h
3250--- linux-3.0.4/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3251+++ linux-3.0.4/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3252@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253
3254 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256- if (n && __access_ok((unsigned long) to, n))
3257+ if ((long)n < 0)
3258+ return n;
3259+
3260+ if (n && __access_ok((unsigned long) to, n)) {
3261+ if (!__builtin_constant_p(n))
3262+ check_object_size(from, n, true);
3263 return __copy_user(to, (__force void __user *) from, n);
3264- else
3265+ } else
3266 return n;
3267 }
3268
3269 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270 {
3271+ if ((long)n < 0)
3272+ return n;
3273+
3274+ if (!__builtin_constant_p(n))
3275+ check_object_size(from, n, true);
3276+
3277 return __copy_user(to, (__force void __user *) from, n);
3278 }
3279
3280 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281 {
3282- if (n && __access_ok((unsigned long) from, n))
3283+ if ((long)n < 0)
3284+ return n;
3285+
3286+ if (n && __access_ok((unsigned long) from, n)) {
3287+ if (!__builtin_constant_p(n))
3288+ check_object_size(to, n, false);
3289 return __copy_user((__force void __user *) to, from, n);
3290- else
3291+ } else
3292 return n;
3293 }
3294
3295 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296 {
3297+ if ((long)n < 0)
3298+ return n;
3299+
3300 return __copy_user((__force void __user *) to, from, n);
3301 }
3302
3303diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_64.h linux-3.0.4/arch/sparc/include/asm/uaccess_64.h
3304--- linux-3.0.4/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3305+++ linux-3.0.4/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3306@@ -10,6 +10,7 @@
3307 #include <linux/compiler.h>
3308 #include <linux/string.h>
3309 #include <linux/thread_info.h>
3310+#include <linux/kernel.h>
3311 #include <asm/asi.h>
3312 #include <asm/system.h>
3313 #include <asm/spitfire.h>
3314@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315 static inline unsigned long __must_check
3316 copy_from_user(void *to, const void __user *from, unsigned long size)
3317 {
3318- unsigned long ret = ___copy_from_user(to, from, size);
3319+ unsigned long ret;
3320
3321+ if ((long)size < 0 || size > INT_MAX)
3322+ return size;
3323+
3324+ if (!__builtin_constant_p(size))
3325+ check_object_size(to, size, false);
3326+
3327+ ret = ___copy_from_user(to, from, size);
3328 if (unlikely(ret))
3329 ret = copy_from_user_fixup(to, from, size);
3330
3331@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332 static inline unsigned long __must_check
3333 copy_to_user(void __user *to, const void *from, unsigned long size)
3334 {
3335- unsigned long ret = ___copy_to_user(to, from, size);
3336+ unsigned long ret;
3337+
3338+ if ((long)size < 0 || size > INT_MAX)
3339+ return size;
3340+
3341+ if (!__builtin_constant_p(size))
3342+ check_object_size(from, size, true);
3343
3344+ ret = ___copy_to_user(to, from, size);
3345 if (unlikely(ret))
3346 ret = copy_to_user_fixup(to, from, size);
3347 return ret;
3348diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess.h linux-3.0.4/arch/sparc/include/asm/uaccess.h
3349--- linux-3.0.4/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3350+++ linux-3.0.4/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3351@@ -1,5 +1,13 @@
3352 #ifndef ___ASM_SPARC_UACCESS_H
3353 #define ___ASM_SPARC_UACCESS_H
3354+
3355+#ifdef __KERNEL__
3356+#ifndef __ASSEMBLY__
3357+#include <linux/types.h>
3358+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359+#endif
3360+#endif
3361+
3362 #if defined(__sparc__) && defined(__arch64__)
3363 #include <asm/uaccess_64.h>
3364 #else
3365diff -urNp linux-3.0.4/arch/sparc/kernel/Makefile linux-3.0.4/arch/sparc/kernel/Makefile
3366--- linux-3.0.4/arch/sparc/kernel/Makefile 2011-07-21 22:17:23.000000000 -0400
3367+++ linux-3.0.4/arch/sparc/kernel/Makefile 2011-08-23 21:47:55.000000000 -0400
3368@@ -3,7 +3,7 @@
3369 #
3370
3371 asflags-y := -ansi
3372-ccflags-y := -Werror
3373+#ccflags-y := -Werror
3374
3375 extra-y := head_$(BITS).o
3376 extra-y += init_task.o
3377diff -urNp linux-3.0.4/arch/sparc/kernel/process_32.c linux-3.0.4/arch/sparc/kernel/process_32.c
3378--- linux-3.0.4/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3379+++ linux-3.0.4/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3380@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381 rw->ins[4], rw->ins[5],
3382 rw->ins[6],
3383 rw->ins[7]);
3384- printk("%pS\n", (void *) rw->ins[7]);
3385+ printk("%pA\n", (void *) rw->ins[7]);
3386 rw = (struct reg_window32 *) rw->ins[6];
3387 }
3388 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390
3391 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3392 r->psr, r->pc, r->npc, r->y, print_tainted());
3393- printk("PC: <%pS>\n", (void *) r->pc);
3394+ printk("PC: <%pA>\n", (void *) r->pc);
3395 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3396 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3399 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403
3404 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3405 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407 rw = (struct reg_window32 *) fp;
3408 pc = rw->ins[7];
3409 printk("[%08lx : ", pc);
3410- printk("%pS ] ", (void *) pc);
3411+ printk("%pA ] ", (void *) pc);
3412 fp = rw->ins[6];
3413 } while (++count < 16);
3414 printk("\n");
3415diff -urNp linux-3.0.4/arch/sparc/kernel/process_64.c linux-3.0.4/arch/sparc/kernel/process_64.c
3416--- linux-3.0.4/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3417+++ linux-3.0.4/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3418@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421 if (regs->tstate & TSTATE_PRIV)
3422- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424 }
3425
3426 void show_regs(struct pt_regs *regs)
3427 {
3428 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3429 regs->tpc, regs->tnpc, regs->y, print_tainted());
3430- printk("TPC: <%pS>\n", (void *) regs->tpc);
3431+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3432 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434 regs->u_regs[3]);
3435@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438 regs->u_regs[15]);
3439- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441 show_regwindow(regs);
3442 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443 }
3444@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445 ((tp && tp->task) ? tp->task->pid : -1));
3446
3447 if (gp->tstate & TSTATE_PRIV) {
3448- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450 (void *) gp->tpc,
3451 (void *) gp->o7,
3452 (void *) gp->i7,
3453diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c
3454--- linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3455+++ linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3456@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457 if (ARCH_SUN4C && len > 0x20000000)
3458 return -ENOMEM;
3459 if (!addr)
3460- addr = TASK_UNMAPPED_BASE;
3461+ addr = current->mm->mmap_base;
3462
3463 if (flags & MAP_SHARED)
3464 addr = COLOUR_ALIGN(addr);
3465@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466 }
3467 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468 return -ENOMEM;
3469- if (!vmm || addr + len <= vmm->vm_start)
3470+ if (check_heap_stack_gap(vmm, addr, len))
3471 return addr;
3472 addr = vmm->vm_end;
3473 if (flags & MAP_SHARED)
3474diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c
3475--- linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3476+++ linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3477@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478 /* We do not accept a shared mapping if it would violate
3479 * cache aliasing constraints.
3480 */
3481- if ((flags & MAP_SHARED) &&
3482+ if ((filp || (flags & MAP_SHARED)) &&
3483 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484 return -EINVAL;
3485 return addr;
3486@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487 if (filp || (flags & MAP_SHARED))
3488 do_color_align = 1;
3489
3490+#ifdef CONFIG_PAX_RANDMMAP
3491+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492+#endif
3493+
3494 if (addr) {
3495 if (do_color_align)
3496 addr = COLOUR_ALIGN(addr, pgoff);
3497@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498 addr = PAGE_ALIGN(addr);
3499
3500 vma = find_vma(mm, addr);
3501- if (task_size - len >= addr &&
3502- (!vma || addr + len <= vma->vm_start))
3503+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504 return addr;
3505 }
3506
3507 if (len > mm->cached_hole_size) {
3508- start_addr = addr = mm->free_area_cache;
3509+ start_addr = addr = mm->free_area_cache;
3510 } else {
3511- start_addr = addr = TASK_UNMAPPED_BASE;
3512+ start_addr = addr = mm->mmap_base;
3513 mm->cached_hole_size = 0;
3514 }
3515
3516@@ -174,14 +177,14 @@ full_search:
3517 vma = find_vma(mm, VA_EXCLUDE_END);
3518 }
3519 if (unlikely(task_size < addr)) {
3520- if (start_addr != TASK_UNMAPPED_BASE) {
3521- start_addr = addr = TASK_UNMAPPED_BASE;
3522+ if (start_addr != mm->mmap_base) {
3523+ start_addr = addr = mm->mmap_base;
3524 mm->cached_hole_size = 0;
3525 goto full_search;
3526 }
3527 return -ENOMEM;
3528 }
3529- if (likely(!vma || addr + len <= vma->vm_start)) {
3530+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3531 /*
3532 * Remember the place where we stopped the search:
3533 */
3534@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535 /* We do not accept a shared mapping if it would violate
3536 * cache aliasing constraints.
3537 */
3538- if ((flags & MAP_SHARED) &&
3539+ if ((filp || (flags & MAP_SHARED)) &&
3540 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541 return -EINVAL;
3542 return addr;
3543@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544 addr = PAGE_ALIGN(addr);
3545
3546 vma = find_vma(mm, addr);
3547- if (task_size - len >= addr &&
3548- (!vma || addr + len <= vma->vm_start))
3549+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550 return addr;
3551 }
3552
3553@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554 /* make sure it can fit in the remaining address space */
3555 if (likely(addr > len)) {
3556 vma = find_vma(mm, addr-len);
3557- if (!vma || addr <= vma->vm_start) {
3558+ if (check_heap_stack_gap(vma, addr - len, len)) {
3559 /* remember the address as a hint for next time */
3560 return (mm->free_area_cache = addr-len);
3561 }
3562@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563 if (unlikely(mm->mmap_base < len))
3564 goto bottomup;
3565
3566- addr = mm->mmap_base-len;
3567- if (do_color_align)
3568- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569+ addr = mm->mmap_base - len;
3570
3571 do {
3572+ if (do_color_align)
3573+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574 /*
3575 * Lookup failure means no vma is above this address,
3576 * else if new region fits below vma->vm_start,
3577 * return with success:
3578 */
3579 vma = find_vma(mm, addr);
3580- if (likely(!vma || addr+len <= vma->vm_start)) {
3581+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3582 /* remember the address as a hint for next time */
3583 return (mm->free_area_cache = addr);
3584 }
3585@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586 mm->cached_hole_size = vma->vm_start - addr;
3587
3588 /* try just below the current vma->vm_start */
3589- addr = vma->vm_start-len;
3590- if (do_color_align)
3591- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592- } while (likely(len < vma->vm_start));
3593+ addr = skip_heap_stack_gap(vma, len);
3594+ } while (!IS_ERR_VALUE(addr));
3595
3596 bottomup:
3597 /*
3598@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599 gap == RLIM_INFINITY ||
3600 sysctl_legacy_va_layout) {
3601 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602+
3603+#ifdef CONFIG_PAX_RANDMMAP
3604+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3605+ mm->mmap_base += mm->delta_mmap;
3606+#endif
3607+
3608 mm->get_unmapped_area = arch_get_unmapped_area;
3609 mm->unmap_area = arch_unmap_area;
3610 } else {
3611@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612 gap = (task_size / 6 * 5);
3613
3614 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615+
3616+#ifdef CONFIG_PAX_RANDMMAP
3617+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3618+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619+#endif
3620+
3621 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622 mm->unmap_area = arch_unmap_area_topdown;
3623 }
3624diff -urNp linux-3.0.4/arch/sparc/kernel/traps_32.c linux-3.0.4/arch/sparc/kernel/traps_32.c
3625--- linux-3.0.4/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3626+++ linux-3.0.4/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3627@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630
3631+extern void gr_handle_kernel_exploit(void);
3632+
3633 void die_if_kernel(char *str, struct pt_regs *regs)
3634 {
3635 static int die_counter;
3636@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637 count++ < 30 &&
3638 (((unsigned long) rw) >= PAGE_OFFSET) &&
3639 !(((unsigned long) rw) & 0x7)) {
3640- printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642 (void *) rw->ins[7]);
3643 rw = (struct reg_window32 *)rw->ins[6];
3644 }
3645 }
3646 printk("Instruction DUMP:");
3647 instruction_dump ((unsigned long *) regs->pc);
3648- if(regs->psr & PSR_PS)
3649+ if(regs->psr & PSR_PS) {
3650+ gr_handle_kernel_exploit();
3651 do_exit(SIGKILL);
3652+ }
3653 do_exit(SIGSEGV);
3654 }
3655
3656diff -urNp linux-3.0.4/arch/sparc/kernel/traps_64.c linux-3.0.4/arch/sparc/kernel/traps_64.c
3657--- linux-3.0.4/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3658+++ linux-3.0.4/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3659@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660 i + 1,
3661 p->trapstack[i].tstate, p->trapstack[i].tpc,
3662 p->trapstack[i].tnpc, p->trapstack[i].tt);
3663- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665 }
3666 }
3667
3668@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669
3670 lvl -= 0x100;
3671 if (regs->tstate & TSTATE_PRIV) {
3672+
3673+#ifdef CONFIG_PAX_REFCOUNT
3674+ if (lvl == 6)
3675+ pax_report_refcount_overflow(regs);
3676+#endif
3677+
3678 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679 die_if_kernel(buffer, regs);
3680 }
3681@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683 {
3684 char buffer[32];
3685-
3686+
3687 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688 0, lvl, SIGTRAP) == NOTIFY_STOP)
3689 return;
3690
3691+#ifdef CONFIG_PAX_REFCOUNT
3692+ if (lvl == 6)
3693+ pax_report_refcount_overflow(regs);
3694+#endif
3695+
3696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697
3698 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701 printk("%s" "ERROR(%d): ",
3702 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703- printk("TPC<%pS>\n", (void *) regs->tpc);
3704+ printk("TPC<%pA>\n", (void *) regs->tpc);
3705 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709 smp_processor_id(),
3710 (type & 0x1) ? 'I' : 'D',
3711 regs->tpc);
3712- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714 panic("Irrecoverable Cheetah+ parity error.");
3715 }
3716
3717@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718 smp_processor_id(),
3719 (type & 0x1) ? 'I' : 'D',
3720 regs->tpc);
3721- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723 }
3724
3725 struct sun4v_error_entry {
3726@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727
3728 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729 regs->tpc, tl);
3730- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735 (void *) regs->u_regs[UREG_I7]);
3736 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737 "pte[%lx] error[%lx]\n",
3738@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739
3740 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741 regs->tpc, tl);
3742- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747 (void *) regs->u_regs[UREG_I7]);
3748 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749 "pte[%lx] error[%lx]\n",
3750@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751 fp = (unsigned long)sf->fp + STACK_BIAS;
3752 }
3753
3754- printk(" [%016lx] %pS\n", pc, (void *) pc);
3755+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758 int index = tsk->curr_ret_stack;
3759 if (tsk->ret_stack && index >= graph) {
3760 pc = tsk->ret_stack[index - graph].ret;
3761- printk(" [%016lx] %pS\n", pc, (void *) pc);
3762+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3763 graph++;
3764 }
3765 }
3766@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767 return (struct reg_window *) (fp + STACK_BIAS);
3768 }
3769
3770+extern void gr_handle_kernel_exploit(void);
3771+
3772 void die_if_kernel(char *str, struct pt_regs *regs)
3773 {
3774 static int die_counter;
3775@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776 while (rw &&
3777 count++ < 30 &&
3778 kstack_valid(tp, (unsigned long) rw)) {
3779- printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781 (void *) rw->ins[7]);
3782
3783 rw = kernel_stack_up(rw);
3784@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785 }
3786 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787 }
3788- if (regs->tstate & TSTATE_PRIV)
3789+ if (regs->tstate & TSTATE_PRIV) {
3790+ gr_handle_kernel_exploit();
3791 do_exit(SIGKILL);
3792+ }
3793 do_exit(SIGSEGV);
3794 }
3795 EXPORT_SYMBOL(die_if_kernel);
3796diff -urNp linux-3.0.4/arch/sparc/kernel/unaligned_64.c linux-3.0.4/arch/sparc/kernel/unaligned_64.c
3797--- linux-3.0.4/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:44:40.000000000 -0400
3798+++ linux-3.0.4/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
3799@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801
3802 if (__ratelimit(&ratelimit)) {
3803- printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805 regs->tpc, (void *) regs->tpc);
3806 }
3807 }
3808diff -urNp linux-3.0.4/arch/sparc/lib/atomic_64.S linux-3.0.4/arch/sparc/lib/atomic_64.S
3809--- linux-3.0.4/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
3810+++ linux-3.0.4/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
3811@@ -18,7 +18,12 @@
3812 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813 BACKOFF_SETUP(%o2)
3814 1: lduw [%o1], %g1
3815- add %g1, %o0, %g7
3816+ addcc %g1, %o0, %g7
3817+
3818+#ifdef CONFIG_PAX_REFCOUNT
3819+ tvs %icc, 6
3820+#endif
3821+
3822 cas [%o1], %g1, %g7
3823 cmp %g1, %g7
3824 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3825@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826 2: BACKOFF_SPIN(%o2, %o3, 1b)
3827 .size atomic_add, .-atomic_add
3828
3829+ .globl atomic_add_unchecked
3830+ .type atomic_add_unchecked,#function
3831+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832+ BACKOFF_SETUP(%o2)
3833+1: lduw [%o1], %g1
3834+ add %g1, %o0, %g7
3835+ cas [%o1], %g1, %g7
3836+ cmp %g1, %g7
3837+ bne,pn %icc, 2f
3838+ nop
3839+ retl
3840+ nop
3841+2: BACKOFF_SPIN(%o2, %o3, 1b)
3842+ .size atomic_add_unchecked, .-atomic_add_unchecked
3843+
3844 .globl atomic_sub
3845 .type atomic_sub,#function
3846 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847 BACKOFF_SETUP(%o2)
3848 1: lduw [%o1], %g1
3849- sub %g1, %o0, %g7
3850+ subcc %g1, %o0, %g7
3851+
3852+#ifdef CONFIG_PAX_REFCOUNT
3853+ tvs %icc, 6
3854+#endif
3855+
3856 cas [%o1], %g1, %g7
3857 cmp %g1, %g7
3858 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3859@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860 2: BACKOFF_SPIN(%o2, %o3, 1b)
3861 .size atomic_sub, .-atomic_sub
3862
3863+ .globl atomic_sub_unchecked
3864+ .type atomic_sub_unchecked,#function
3865+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866+ BACKOFF_SETUP(%o2)
3867+1: lduw [%o1], %g1
3868+ sub %g1, %o0, %g7
3869+ cas [%o1], %g1, %g7
3870+ cmp %g1, %g7
3871+ bne,pn %icc, 2f
3872+ nop
3873+ retl
3874+ nop
3875+2: BACKOFF_SPIN(%o2, %o3, 1b)
3876+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
3877+
3878 .globl atomic_add_ret
3879 .type atomic_add_ret,#function
3880 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881 BACKOFF_SETUP(%o2)
3882 1: lduw [%o1], %g1
3883- add %g1, %o0, %g7
3884+ addcc %g1, %o0, %g7
3885+
3886+#ifdef CONFIG_PAX_REFCOUNT
3887+ tvs %icc, 6
3888+#endif
3889+
3890 cas [%o1], %g1, %g7
3891 cmp %g1, %g7
3892 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3893@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3894 2: BACKOFF_SPIN(%o2, %o3, 1b)
3895 .size atomic_add_ret, .-atomic_add_ret
3896
3897+ .globl atomic_add_ret_unchecked
3898+ .type atomic_add_ret_unchecked,#function
3899+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900+ BACKOFF_SETUP(%o2)
3901+1: lduw [%o1], %g1
3902+ addcc %g1, %o0, %g7
3903+ cas [%o1], %g1, %g7
3904+ cmp %g1, %g7
3905+ bne,pn %icc, 2f
3906+ add %g7, %o0, %g7
3907+ sra %g7, 0, %o0
3908+ retl
3909+ nop
3910+2: BACKOFF_SPIN(%o2, %o3, 1b)
3911+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912+
3913 .globl atomic_sub_ret
3914 .type atomic_sub_ret,#function
3915 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916 BACKOFF_SETUP(%o2)
3917 1: lduw [%o1], %g1
3918- sub %g1, %o0, %g7
3919+ subcc %g1, %o0, %g7
3920+
3921+#ifdef CONFIG_PAX_REFCOUNT
3922+ tvs %icc, 6
3923+#endif
3924+
3925 cas [%o1], %g1, %g7
3926 cmp %g1, %g7
3927 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3928@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3929 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930 BACKOFF_SETUP(%o2)
3931 1: ldx [%o1], %g1
3932- add %g1, %o0, %g7
3933+ addcc %g1, %o0, %g7
3934+
3935+#ifdef CONFIG_PAX_REFCOUNT
3936+ tvs %xcc, 6
3937+#endif
3938+
3939 casx [%o1], %g1, %g7
3940 cmp %g1, %g7
3941 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3942@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3943 2: BACKOFF_SPIN(%o2, %o3, 1b)
3944 .size atomic64_add, .-atomic64_add
3945
3946+ .globl atomic64_add_unchecked
3947+ .type atomic64_add_unchecked,#function
3948+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949+ BACKOFF_SETUP(%o2)
3950+1: ldx [%o1], %g1
3951+ addcc %g1, %o0, %g7
3952+ casx [%o1], %g1, %g7
3953+ cmp %g1, %g7
3954+ bne,pn %xcc, 2f
3955+ nop
3956+ retl
3957+ nop
3958+2: BACKOFF_SPIN(%o2, %o3, 1b)
3959+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
3960+
3961 .globl atomic64_sub
3962 .type atomic64_sub,#function
3963 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964 BACKOFF_SETUP(%o2)
3965 1: ldx [%o1], %g1
3966- sub %g1, %o0, %g7
3967+ subcc %g1, %o0, %g7
3968+
3969+#ifdef CONFIG_PAX_REFCOUNT
3970+ tvs %xcc, 6
3971+#endif
3972+
3973 casx [%o1], %g1, %g7
3974 cmp %g1, %g7
3975 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3976@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3977 2: BACKOFF_SPIN(%o2, %o3, 1b)
3978 .size atomic64_sub, .-atomic64_sub
3979
3980+ .globl atomic64_sub_unchecked
3981+ .type atomic64_sub_unchecked,#function
3982+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983+ BACKOFF_SETUP(%o2)
3984+1: ldx [%o1], %g1
3985+ subcc %g1, %o0, %g7
3986+ casx [%o1], %g1, %g7
3987+ cmp %g1, %g7
3988+ bne,pn %xcc, 2f
3989+ nop
3990+ retl
3991+ nop
3992+2: BACKOFF_SPIN(%o2, %o3, 1b)
3993+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994+
3995 .globl atomic64_add_ret
3996 .type atomic64_add_ret,#function
3997 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998 BACKOFF_SETUP(%o2)
3999 1: ldx [%o1], %g1
4000- add %g1, %o0, %g7
4001+ addcc %g1, %o0, %g7
4002+
4003+#ifdef CONFIG_PAX_REFCOUNT
4004+ tvs %xcc, 6
4005+#endif
4006+
4007 casx [%o1], %g1, %g7
4008 cmp %g1, %g7
4009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4010@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011 2: BACKOFF_SPIN(%o2, %o3, 1b)
4012 .size atomic64_add_ret, .-atomic64_add_ret
4013
4014+ .globl atomic64_add_ret_unchecked
4015+ .type atomic64_add_ret_unchecked,#function
4016+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017+ BACKOFF_SETUP(%o2)
4018+1: ldx [%o1], %g1
4019+ addcc %g1, %o0, %g7
4020+ casx [%o1], %g1, %g7
4021+ cmp %g1, %g7
4022+ bne,pn %xcc, 2f
4023+ add %g7, %o0, %g7
4024+ mov %g7, %o0
4025+ retl
4026+ nop
4027+2: BACKOFF_SPIN(%o2, %o3, 1b)
4028+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029+
4030 .globl atomic64_sub_ret
4031 .type atomic64_sub_ret,#function
4032 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033 BACKOFF_SETUP(%o2)
4034 1: ldx [%o1], %g1
4035- sub %g1, %o0, %g7
4036+ subcc %g1, %o0, %g7
4037+
4038+#ifdef CONFIG_PAX_REFCOUNT
4039+ tvs %xcc, 6
4040+#endif
4041+
4042 casx [%o1], %g1, %g7
4043 cmp %g1, %g7
4044 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4045diff -urNp linux-3.0.4/arch/sparc/lib/ksyms.c linux-3.0.4/arch/sparc/lib/ksyms.c
4046--- linux-3.0.4/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4047+++ linux-3.0.4/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4048@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049
4050 /* Atomic counter implementation. */
4051 EXPORT_SYMBOL(atomic_add);
4052+EXPORT_SYMBOL(atomic_add_unchecked);
4053 EXPORT_SYMBOL(atomic_add_ret);
4054+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055 EXPORT_SYMBOL(atomic_sub);
4056+EXPORT_SYMBOL(atomic_sub_unchecked);
4057 EXPORT_SYMBOL(atomic_sub_ret);
4058 EXPORT_SYMBOL(atomic64_add);
4059+EXPORT_SYMBOL(atomic64_add_unchecked);
4060 EXPORT_SYMBOL(atomic64_add_ret);
4061+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062 EXPORT_SYMBOL(atomic64_sub);
4063+EXPORT_SYMBOL(atomic64_sub_unchecked);
4064 EXPORT_SYMBOL(atomic64_sub_ret);
4065
4066 /* Atomic bit operations. */
4067diff -urNp linux-3.0.4/arch/sparc/lib/Makefile linux-3.0.4/arch/sparc/lib/Makefile
4068--- linux-3.0.4/arch/sparc/lib/Makefile 2011-08-23 21:44:40.000000000 -0400
4069+++ linux-3.0.4/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070@@ -2,7 +2,7 @@
4071 #
4072
4073 asflags-y := -ansi -DST_DIV0=0x02
4074-ccflags-y := -Werror
4075+#ccflags-y := -Werror
4076
4077 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079diff -urNp linux-3.0.4/arch/sparc/Makefile linux-3.0.4/arch/sparc/Makefile
4080--- linux-3.0.4/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
4081+++ linux-3.0.4/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
4082@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4083 # Export what is needed by arch/sparc/boot/Makefile
4084 export VMLINUX_INIT VMLINUX_MAIN
4085 VMLINUX_INIT := $(head-y) $(init-y)
4086-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089 VMLINUX_MAIN += $(drivers-y) $(net-y)
4090
4091diff -urNp linux-3.0.4/arch/sparc/mm/fault_32.c linux-3.0.4/arch/sparc/mm/fault_32.c
4092--- linux-3.0.4/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4093+++ linux-3.0.4/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4094@@ -22,6 +22,9 @@
4095 #include <linux/interrupt.h>
4096 #include <linux/module.h>
4097 #include <linux/kdebug.h>
4098+#include <linux/slab.h>
4099+#include <linux/pagemap.h>
4100+#include <linux/compiler.h>
4101
4102 #include <asm/system.h>
4103 #include <asm/page.h>
4104@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105 return safe_compute_effective_address(regs, insn);
4106 }
4107
4108+#ifdef CONFIG_PAX_PAGEEXEC
4109+#ifdef CONFIG_PAX_DLRESOLVE
4110+static void pax_emuplt_close(struct vm_area_struct *vma)
4111+{
4112+ vma->vm_mm->call_dl_resolve = 0UL;
4113+}
4114+
4115+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116+{
4117+ unsigned int *kaddr;
4118+
4119+ vmf->page = alloc_page(GFP_HIGHUSER);
4120+ if (!vmf->page)
4121+ return VM_FAULT_OOM;
4122+
4123+ kaddr = kmap(vmf->page);
4124+ memset(kaddr, 0, PAGE_SIZE);
4125+ kaddr[0] = 0x9DE3BFA8U; /* save */
4126+ flush_dcache_page(vmf->page);
4127+ kunmap(vmf->page);
4128+ return VM_FAULT_MAJOR;
4129+}
4130+
4131+static const struct vm_operations_struct pax_vm_ops = {
4132+ .close = pax_emuplt_close,
4133+ .fault = pax_emuplt_fault
4134+};
4135+
4136+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137+{
4138+ int ret;
4139+
4140+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4141+ vma->vm_mm = current->mm;
4142+ vma->vm_start = addr;
4143+ vma->vm_end = addr + PAGE_SIZE;
4144+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146+ vma->vm_ops = &pax_vm_ops;
4147+
4148+ ret = insert_vm_struct(current->mm, vma);
4149+ if (ret)
4150+ return ret;
4151+
4152+ ++current->mm->total_vm;
4153+ return 0;
4154+}
4155+#endif
4156+
4157+/*
4158+ * PaX: decide what to do with offenders (regs->pc = fault address)
4159+ *
4160+ * returns 1 when task should be killed
4161+ * 2 when patched PLT trampoline was detected
4162+ * 3 when unpatched PLT trampoline was detected
4163+ */
4164+static int pax_handle_fetch_fault(struct pt_regs *regs)
4165+{
4166+
4167+#ifdef CONFIG_PAX_EMUPLT
4168+ int err;
4169+
4170+ do { /* PaX: patched PLT emulation #1 */
4171+ unsigned int sethi1, sethi2, jmpl;
4172+
4173+ err = get_user(sethi1, (unsigned int *)regs->pc);
4174+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176+
4177+ if (err)
4178+ break;
4179+
4180+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183+ {
4184+ unsigned int addr;
4185+
4186+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187+ addr = regs->u_regs[UREG_G1];
4188+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189+ regs->pc = addr;
4190+ regs->npc = addr+4;
4191+ return 2;
4192+ }
4193+ } while (0);
4194+
4195+ { /* PaX: patched PLT emulation #2 */
4196+ unsigned int ba;
4197+
4198+ err = get_user(ba, (unsigned int *)regs->pc);
4199+
4200+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201+ unsigned int addr;
4202+
4203+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204+ regs->pc = addr;
4205+ regs->npc = addr+4;
4206+ return 2;
4207+ }
4208+ }
4209+
4210+ do { /* PaX: patched PLT emulation #3 */
4211+ unsigned int sethi, jmpl, nop;
4212+
4213+ err = get_user(sethi, (unsigned int *)regs->pc);
4214+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216+
4217+ if (err)
4218+ break;
4219+
4220+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222+ nop == 0x01000000U)
4223+ {
4224+ unsigned int addr;
4225+
4226+ addr = (sethi & 0x003FFFFFU) << 10;
4227+ regs->u_regs[UREG_G1] = addr;
4228+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229+ regs->pc = addr;
4230+ regs->npc = addr+4;
4231+ return 2;
4232+ }
4233+ } while (0);
4234+
4235+ do { /* PaX: unpatched PLT emulation step 1 */
4236+ unsigned int sethi, ba, nop;
4237+
4238+ err = get_user(sethi, (unsigned int *)regs->pc);
4239+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241+
4242+ if (err)
4243+ break;
4244+
4245+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247+ nop == 0x01000000U)
4248+ {
4249+ unsigned int addr, save, call;
4250+
4251+ if ((ba & 0xFFC00000U) == 0x30800000U)
4252+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253+ else
4254+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255+
4256+ err = get_user(save, (unsigned int *)addr);
4257+ err |= get_user(call, (unsigned int *)(addr+4));
4258+ err |= get_user(nop, (unsigned int *)(addr+8));
4259+ if (err)
4260+ break;
4261+
4262+#ifdef CONFIG_PAX_DLRESOLVE
4263+ if (save == 0x9DE3BFA8U &&
4264+ (call & 0xC0000000U) == 0x40000000U &&
4265+ nop == 0x01000000U)
4266+ {
4267+ struct vm_area_struct *vma;
4268+ unsigned long call_dl_resolve;
4269+
4270+ down_read(&current->mm->mmap_sem);
4271+ call_dl_resolve = current->mm->call_dl_resolve;
4272+ up_read(&current->mm->mmap_sem);
4273+ if (likely(call_dl_resolve))
4274+ goto emulate;
4275+
4276+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277+
4278+ down_write(&current->mm->mmap_sem);
4279+ if (current->mm->call_dl_resolve) {
4280+ call_dl_resolve = current->mm->call_dl_resolve;
4281+ up_write(&current->mm->mmap_sem);
4282+ if (vma)
4283+ kmem_cache_free(vm_area_cachep, vma);
4284+ goto emulate;
4285+ }
4286+
4287+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289+ up_write(&current->mm->mmap_sem);
4290+ if (vma)
4291+ kmem_cache_free(vm_area_cachep, vma);
4292+ return 1;
4293+ }
4294+
4295+ if (pax_insert_vma(vma, call_dl_resolve)) {
4296+ up_write(&current->mm->mmap_sem);
4297+ kmem_cache_free(vm_area_cachep, vma);
4298+ return 1;
4299+ }
4300+
4301+ current->mm->call_dl_resolve = call_dl_resolve;
4302+ up_write(&current->mm->mmap_sem);
4303+
4304+emulate:
4305+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306+ regs->pc = call_dl_resolve;
4307+ regs->npc = addr+4;
4308+ return 3;
4309+ }
4310+#endif
4311+
4312+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313+ if ((save & 0xFFC00000U) == 0x05000000U &&
4314+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4315+ nop == 0x01000000U)
4316+ {
4317+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318+ regs->u_regs[UREG_G2] = addr + 4;
4319+ addr = (save & 0x003FFFFFU) << 10;
4320+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321+ regs->pc = addr;
4322+ regs->npc = addr+4;
4323+ return 3;
4324+ }
4325+ }
4326+ } while (0);
4327+
4328+ do { /* PaX: unpatched PLT emulation step 2 */
4329+ unsigned int save, call, nop;
4330+
4331+ err = get_user(save, (unsigned int *)(regs->pc-4));
4332+ err |= get_user(call, (unsigned int *)regs->pc);
4333+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334+ if (err)
4335+ break;
4336+
4337+ if (save == 0x9DE3BFA8U &&
4338+ (call & 0xC0000000U) == 0x40000000U &&
4339+ nop == 0x01000000U)
4340+ {
4341+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342+
4343+ regs->u_regs[UREG_RETPC] = regs->pc;
4344+ regs->pc = dl_resolve;
4345+ regs->npc = dl_resolve+4;
4346+ return 3;
4347+ }
4348+ } while (0);
4349+#endif
4350+
4351+ return 1;
4352+}
4353+
4354+void pax_report_insns(void *pc, void *sp)
4355+{
4356+ unsigned long i;
4357+
4358+ printk(KERN_ERR "PAX: bytes at PC: ");
4359+ for (i = 0; i < 8; i++) {
4360+ unsigned int c;
4361+ if (get_user(c, (unsigned int *)pc+i))
4362+ printk(KERN_CONT "???????? ");
4363+ else
4364+ printk(KERN_CONT "%08x ", c);
4365+ }
4366+ printk("\n");
4367+}
4368+#endif
4369+
4370 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371 int text_fault)
4372 {
4373@@ -281,6 +546,24 @@ good_area:
4374 if(!(vma->vm_flags & VM_WRITE))
4375 goto bad_area;
4376 } else {
4377+
4378+#ifdef CONFIG_PAX_PAGEEXEC
4379+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380+ up_read(&mm->mmap_sem);
4381+ switch (pax_handle_fetch_fault(regs)) {
4382+
4383+#ifdef CONFIG_PAX_EMUPLT
4384+ case 2:
4385+ case 3:
4386+ return;
4387+#endif
4388+
4389+ }
4390+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391+ do_group_exit(SIGKILL);
4392+ }
4393+#endif
4394+
4395 /* Allow reads even for write-only mappings */
4396 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397 goto bad_area;
4398diff -urNp linux-3.0.4/arch/sparc/mm/fault_64.c linux-3.0.4/arch/sparc/mm/fault_64.c
4399--- linux-3.0.4/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4400+++ linux-3.0.4/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4401@@ -21,6 +21,9 @@
4402 #include <linux/kprobes.h>
4403 #include <linux/kdebug.h>
4404 #include <linux/percpu.h>
4405+#include <linux/slab.h>
4406+#include <linux/pagemap.h>
4407+#include <linux/compiler.h>
4408
4409 #include <asm/page.h>
4410 #include <asm/pgtable.h>
4411@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413 regs->tpc);
4414 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418 dump_stack();
4419 unhandled_fault(regs->tpc, current, regs);
4420@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421 show_regs(regs);
4422 }
4423
4424+#ifdef CONFIG_PAX_PAGEEXEC
4425+#ifdef CONFIG_PAX_DLRESOLVE
4426+static void pax_emuplt_close(struct vm_area_struct *vma)
4427+{
4428+ vma->vm_mm->call_dl_resolve = 0UL;
4429+}
4430+
4431+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432+{
4433+ unsigned int *kaddr;
4434+
4435+ vmf->page = alloc_page(GFP_HIGHUSER);
4436+ if (!vmf->page)
4437+ return VM_FAULT_OOM;
4438+
4439+ kaddr = kmap(vmf->page);
4440+ memset(kaddr, 0, PAGE_SIZE);
4441+ kaddr[0] = 0x9DE3BFA8U; /* save */
4442+ flush_dcache_page(vmf->page);
4443+ kunmap(vmf->page);
4444+ return VM_FAULT_MAJOR;
4445+}
4446+
4447+static const struct vm_operations_struct pax_vm_ops = {
4448+ .close = pax_emuplt_close,
4449+ .fault = pax_emuplt_fault
4450+};
4451+
4452+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453+{
4454+ int ret;
4455+
4456+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4457+ vma->vm_mm = current->mm;
4458+ vma->vm_start = addr;
4459+ vma->vm_end = addr + PAGE_SIZE;
4460+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462+ vma->vm_ops = &pax_vm_ops;
4463+
4464+ ret = insert_vm_struct(current->mm, vma);
4465+ if (ret)
4466+ return ret;
4467+
4468+ ++current->mm->total_vm;
4469+ return 0;
4470+}
4471+#endif
4472+
4473+/*
4474+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4475+ *
4476+ * returns 1 when task should be killed
4477+ * 2 when patched PLT trampoline was detected
4478+ * 3 when unpatched PLT trampoline was detected
4479+ */
4480+static int pax_handle_fetch_fault(struct pt_regs *regs)
4481+{
4482+
4483+#ifdef CONFIG_PAX_EMUPLT
4484+ int err;
4485+
4486+ do { /* PaX: patched PLT emulation #1 */
4487+ unsigned int sethi1, sethi2, jmpl;
4488+
4489+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4490+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492+
4493+ if (err)
4494+ break;
4495+
4496+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499+ {
4500+ unsigned long addr;
4501+
4502+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503+ addr = regs->u_regs[UREG_G1];
4504+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505+
4506+ if (test_thread_flag(TIF_32BIT))
4507+ addr &= 0xFFFFFFFFUL;
4508+
4509+ regs->tpc = addr;
4510+ regs->tnpc = addr+4;
4511+ return 2;
4512+ }
4513+ } while (0);
4514+
4515+ { /* PaX: patched PLT emulation #2 */
4516+ unsigned int ba;
4517+
4518+ err = get_user(ba, (unsigned int *)regs->tpc);
4519+
4520+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521+ unsigned long addr;
4522+
4523+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524+
4525+ if (test_thread_flag(TIF_32BIT))
4526+ addr &= 0xFFFFFFFFUL;
4527+
4528+ regs->tpc = addr;
4529+ regs->tnpc = addr+4;
4530+ return 2;
4531+ }
4532+ }
4533+
4534+ do { /* PaX: patched PLT emulation #3 */
4535+ unsigned int sethi, jmpl, nop;
4536+
4537+ err = get_user(sethi, (unsigned int *)regs->tpc);
4538+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540+
4541+ if (err)
4542+ break;
4543+
4544+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546+ nop == 0x01000000U)
4547+ {
4548+ unsigned long addr;
4549+
4550+ addr = (sethi & 0x003FFFFFU) << 10;
4551+ regs->u_regs[UREG_G1] = addr;
4552+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553+
4554+ if (test_thread_flag(TIF_32BIT))
4555+ addr &= 0xFFFFFFFFUL;
4556+
4557+ regs->tpc = addr;
4558+ regs->tnpc = addr+4;
4559+ return 2;
4560+ }
4561+ } while (0);
4562+
4563+ do { /* PaX: patched PLT emulation #4 */
4564+ unsigned int sethi, mov1, call, mov2;
4565+
4566+ err = get_user(sethi, (unsigned int *)regs->tpc);
4567+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570+
4571+ if (err)
4572+ break;
4573+
4574+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575+ mov1 == 0x8210000FU &&
4576+ (call & 0xC0000000U) == 0x40000000U &&
4577+ mov2 == 0x9E100001U)
4578+ {
4579+ unsigned long addr;
4580+
4581+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583+
4584+ if (test_thread_flag(TIF_32BIT))
4585+ addr &= 0xFFFFFFFFUL;
4586+
4587+ regs->tpc = addr;
4588+ regs->tnpc = addr+4;
4589+ return 2;
4590+ }
4591+ } while (0);
4592+
4593+ do { /* PaX: patched PLT emulation #5 */
4594+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595+
4596+ err = get_user(sethi, (unsigned int *)regs->tpc);
4597+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604+
4605+ if (err)
4606+ break;
4607+
4608+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611+ (or1 & 0xFFFFE000U) == 0x82106000U &&
4612+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613+ sllx == 0x83287020U &&
4614+ jmpl == 0x81C04005U &&
4615+ nop == 0x01000000U)
4616+ {
4617+ unsigned long addr;
4618+
4619+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620+ regs->u_regs[UREG_G1] <<= 32;
4621+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623+ regs->tpc = addr;
4624+ regs->tnpc = addr+4;
4625+ return 2;
4626+ }
4627+ } while (0);
4628+
4629+ do { /* PaX: patched PLT emulation #6 */
4630+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4631+
4632+ err = get_user(sethi, (unsigned int *)regs->tpc);
4633+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639+
4640+ if (err)
4641+ break;
4642+
4643+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646+ sllx == 0x83287020U &&
4647+ (or & 0xFFFFE000U) == 0x8A116000U &&
4648+ jmpl == 0x81C04005U &&
4649+ nop == 0x01000000U)
4650+ {
4651+ unsigned long addr;
4652+
4653+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654+ regs->u_regs[UREG_G1] <<= 32;
4655+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657+ regs->tpc = addr;
4658+ regs->tnpc = addr+4;
4659+ return 2;
4660+ }
4661+ } while (0);
4662+
4663+ do { /* PaX: unpatched PLT emulation step 1 */
4664+ unsigned int sethi, ba, nop;
4665+
4666+ err = get_user(sethi, (unsigned int *)regs->tpc);
4667+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669+
4670+ if (err)
4671+ break;
4672+
4673+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675+ nop == 0x01000000U)
4676+ {
4677+ unsigned long addr;
4678+ unsigned int save, call;
4679+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680+
4681+ if ((ba & 0xFFC00000U) == 0x30800000U)
4682+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683+ else
4684+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685+
4686+ if (test_thread_flag(TIF_32BIT))
4687+ addr &= 0xFFFFFFFFUL;
4688+
4689+ err = get_user(save, (unsigned int *)addr);
4690+ err |= get_user(call, (unsigned int *)(addr+4));
4691+ err |= get_user(nop, (unsigned int *)(addr+8));
4692+ if (err)
4693+ break;
4694+
4695+#ifdef CONFIG_PAX_DLRESOLVE
4696+ if (save == 0x9DE3BFA8U &&
4697+ (call & 0xC0000000U) == 0x40000000U &&
4698+ nop == 0x01000000U)
4699+ {
4700+ struct vm_area_struct *vma;
4701+ unsigned long call_dl_resolve;
4702+
4703+ down_read(&current->mm->mmap_sem);
4704+ call_dl_resolve = current->mm->call_dl_resolve;
4705+ up_read(&current->mm->mmap_sem);
4706+ if (likely(call_dl_resolve))
4707+ goto emulate;
4708+
4709+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710+
4711+ down_write(&current->mm->mmap_sem);
4712+ if (current->mm->call_dl_resolve) {
4713+ call_dl_resolve = current->mm->call_dl_resolve;
4714+ up_write(&current->mm->mmap_sem);
4715+ if (vma)
4716+ kmem_cache_free(vm_area_cachep, vma);
4717+ goto emulate;
4718+ }
4719+
4720+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722+ up_write(&current->mm->mmap_sem);
4723+ if (vma)
4724+ kmem_cache_free(vm_area_cachep, vma);
4725+ return 1;
4726+ }
4727+
4728+ if (pax_insert_vma(vma, call_dl_resolve)) {
4729+ up_write(&current->mm->mmap_sem);
4730+ kmem_cache_free(vm_area_cachep, vma);
4731+ return 1;
4732+ }
4733+
4734+ current->mm->call_dl_resolve = call_dl_resolve;
4735+ up_write(&current->mm->mmap_sem);
4736+
4737+emulate:
4738+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739+ regs->tpc = call_dl_resolve;
4740+ regs->tnpc = addr+4;
4741+ return 3;
4742+ }
4743+#endif
4744+
4745+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746+ if ((save & 0xFFC00000U) == 0x05000000U &&
4747+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4748+ nop == 0x01000000U)
4749+ {
4750+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751+ regs->u_regs[UREG_G2] = addr + 4;
4752+ addr = (save & 0x003FFFFFU) << 10;
4753+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754+
4755+ if (test_thread_flag(TIF_32BIT))
4756+ addr &= 0xFFFFFFFFUL;
4757+
4758+ regs->tpc = addr;
4759+ regs->tnpc = addr+4;
4760+ return 3;
4761+ }
4762+
4763+ /* PaX: 64-bit PLT stub */
4764+ err = get_user(sethi1, (unsigned int *)addr);
4765+ err |= get_user(sethi2, (unsigned int *)(addr+4));
4766+ err |= get_user(or1, (unsigned int *)(addr+8));
4767+ err |= get_user(or2, (unsigned int *)(addr+12));
4768+ err |= get_user(sllx, (unsigned int *)(addr+16));
4769+ err |= get_user(add, (unsigned int *)(addr+20));
4770+ err |= get_user(jmpl, (unsigned int *)(addr+24));
4771+ err |= get_user(nop, (unsigned int *)(addr+28));
4772+ if (err)
4773+ break;
4774+
4775+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777+ (or1 & 0xFFFFE000U) == 0x88112000U &&
4778+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779+ sllx == 0x89293020U &&
4780+ add == 0x8A010005U &&
4781+ jmpl == 0x89C14000U &&
4782+ nop == 0x01000000U)
4783+ {
4784+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786+ regs->u_regs[UREG_G4] <<= 32;
4787+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789+ regs->u_regs[UREG_G4] = addr + 24;
4790+ addr = regs->u_regs[UREG_G5];
4791+ regs->tpc = addr;
4792+ regs->tnpc = addr+4;
4793+ return 3;
4794+ }
4795+ }
4796+ } while (0);
4797+
4798+#ifdef CONFIG_PAX_DLRESOLVE
4799+ do { /* PaX: unpatched PLT emulation step 2 */
4800+ unsigned int save, call, nop;
4801+
4802+ err = get_user(save, (unsigned int *)(regs->tpc-4));
4803+ err |= get_user(call, (unsigned int *)regs->tpc);
4804+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805+ if (err)
4806+ break;
4807+
4808+ if (save == 0x9DE3BFA8U &&
4809+ (call & 0xC0000000U) == 0x40000000U &&
4810+ nop == 0x01000000U)
4811+ {
4812+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813+
4814+ if (test_thread_flag(TIF_32BIT))
4815+ dl_resolve &= 0xFFFFFFFFUL;
4816+
4817+ regs->u_regs[UREG_RETPC] = regs->tpc;
4818+ regs->tpc = dl_resolve;
4819+ regs->tnpc = dl_resolve+4;
4820+ return 3;
4821+ }
4822+ } while (0);
4823+#endif
4824+
4825+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826+ unsigned int sethi, ba, nop;
4827+
4828+ err = get_user(sethi, (unsigned int *)regs->tpc);
4829+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831+
4832+ if (err)
4833+ break;
4834+
4835+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836+ (ba & 0xFFF00000U) == 0x30600000U &&
4837+ nop == 0x01000000U)
4838+ {
4839+ unsigned long addr;
4840+
4841+ addr = (sethi & 0x003FFFFFU) << 10;
4842+ regs->u_regs[UREG_G1] = addr;
4843+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844+
4845+ if (test_thread_flag(TIF_32BIT))
4846+ addr &= 0xFFFFFFFFUL;
4847+
4848+ regs->tpc = addr;
4849+ regs->tnpc = addr+4;
4850+ return 2;
4851+ }
4852+ } while (0);
4853+
4854+#endif
4855+
4856+ return 1;
4857+}
4858+
4859+void pax_report_insns(void *pc, void *sp)
4860+{
4861+ unsigned long i;
4862+
4863+ printk(KERN_ERR "PAX: bytes at PC: ");
4864+ for (i = 0; i < 8; i++) {
4865+ unsigned int c;
4866+ if (get_user(c, (unsigned int *)pc+i))
4867+ printk(KERN_CONT "???????? ");
4868+ else
4869+ printk(KERN_CONT "%08x ", c);
4870+ }
4871+ printk("\n");
4872+}
4873+#endif
4874+
4875 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876 {
4877 struct mm_struct *mm = current->mm;
4878@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879 if (!vma)
4880 goto bad_area;
4881
4882+#ifdef CONFIG_PAX_PAGEEXEC
4883+ /* PaX: detect ITLB misses on non-exec pages */
4884+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886+ {
4887+ if (address != regs->tpc)
4888+ goto good_area;
4889+
4890+ up_read(&mm->mmap_sem);
4891+ switch (pax_handle_fetch_fault(regs)) {
4892+
4893+#ifdef CONFIG_PAX_EMUPLT
4894+ case 2:
4895+ case 3:
4896+ return;
4897+#endif
4898+
4899+ }
4900+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901+ do_group_exit(SIGKILL);
4902+ }
4903+#endif
4904+
4905 /* Pure DTLB misses do not tell us whether the fault causing
4906 * load/store/atomic was a write or not, it only says that there
4907 * was no match. So in such a case we (carefully) read the
4908diff -urNp linux-3.0.4/arch/sparc/mm/hugetlbpage.c linux-3.0.4/arch/sparc/mm/hugetlbpage.c
4909--- linux-3.0.4/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
4910+++ linux-3.0.4/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
4911@@ -68,7 +68,7 @@ full_search:
4912 }
4913 return -ENOMEM;
4914 }
4915- if (likely(!vma || addr + len <= vma->vm_start)) {
4916+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /*
4918 * Remember the place where we stopped the search:
4919 */
4920@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921 /* make sure it can fit in the remaining address space */
4922 if (likely(addr > len)) {
4923 vma = find_vma(mm, addr-len);
4924- if (!vma || addr <= vma->vm_start) {
4925+ if (check_heap_stack_gap(vma, addr - len, len)) {
4926 /* remember the address as a hint for next time */
4927 return (mm->free_area_cache = addr-len);
4928 }
4929@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930 if (unlikely(mm->mmap_base < len))
4931 goto bottomup;
4932
4933- addr = (mm->mmap_base-len) & HPAGE_MASK;
4934+ addr = mm->mmap_base - len;
4935
4936 do {
4937+ addr &= HPAGE_MASK;
4938 /*
4939 * Lookup failure means no vma is above this address,
4940 * else if new region fits below vma->vm_start,
4941 * return with success:
4942 */
4943 vma = find_vma(mm, addr);
4944- if (likely(!vma || addr+len <= vma->vm_start)) {
4945+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4946 /* remember the address as a hint for next time */
4947 return (mm->free_area_cache = addr);
4948 }
4949@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950 mm->cached_hole_size = vma->vm_start - addr;
4951
4952 /* try just below the current vma->vm_start */
4953- addr = (vma->vm_start-len) & HPAGE_MASK;
4954- } while (likely(len < vma->vm_start));
4955+ addr = skip_heap_stack_gap(vma, len);
4956+ } while (!IS_ERR_VALUE(addr));
4957
4958 bottomup:
4959 /*
4960@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961 if (addr) {
4962 addr = ALIGN(addr, HPAGE_SIZE);
4963 vma = find_vma(mm, addr);
4964- if (task_size - len >= addr &&
4965- (!vma || addr + len <= vma->vm_start))
4966+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967 return addr;
4968 }
4969 if (mm->get_unmapped_area == arch_get_unmapped_area)
4970diff -urNp linux-3.0.4/arch/sparc/mm/init_32.c linux-3.0.4/arch/sparc/mm/init_32.c
4971--- linux-3.0.4/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972+++ linux-3.0.4/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973@@ -316,6 +316,9 @@ extern void device_scan(void);
4974 pgprot_t PAGE_SHARED __read_mostly;
4975 EXPORT_SYMBOL(PAGE_SHARED);
4976
4977+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979+
4980 void __init paging_init(void)
4981 {
4982 switch(sparc_cpu_model) {
4983@@ -344,17 +347,17 @@ void __init paging_init(void)
4984
4985 /* Initialize the protection map with non-constant, MMU dependent values. */
4986 protection_map[0] = PAGE_NONE;
4987- protection_map[1] = PAGE_READONLY;
4988- protection_map[2] = PAGE_COPY;
4989- protection_map[3] = PAGE_COPY;
4990+ protection_map[1] = PAGE_READONLY_NOEXEC;
4991+ protection_map[2] = PAGE_COPY_NOEXEC;
4992+ protection_map[3] = PAGE_COPY_NOEXEC;
4993 protection_map[4] = PAGE_READONLY;
4994 protection_map[5] = PAGE_READONLY;
4995 protection_map[6] = PAGE_COPY;
4996 protection_map[7] = PAGE_COPY;
4997 protection_map[8] = PAGE_NONE;
4998- protection_map[9] = PAGE_READONLY;
4999- protection_map[10] = PAGE_SHARED;
5000- protection_map[11] = PAGE_SHARED;
5001+ protection_map[9] = PAGE_READONLY_NOEXEC;
5002+ protection_map[10] = PAGE_SHARED_NOEXEC;
5003+ protection_map[11] = PAGE_SHARED_NOEXEC;
5004 protection_map[12] = PAGE_READONLY;
5005 protection_map[13] = PAGE_READONLY;
5006 protection_map[14] = PAGE_SHARED;
5007diff -urNp linux-3.0.4/arch/sparc/mm/Makefile linux-3.0.4/arch/sparc/mm/Makefile
5008--- linux-3.0.4/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
5009+++ linux-3.0.4/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
5010@@ -2,7 +2,7 @@
5011 #
5012
5013 asflags-y := -ansi
5014-ccflags-y := -Werror
5015+#ccflags-y := -Werror
5016
5017 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5018 obj-y += fault_$(BITS).o
5019diff -urNp linux-3.0.4/arch/sparc/mm/srmmu.c linux-3.0.4/arch/sparc/mm/srmmu.c
5020--- linux-3.0.4/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5021+++ linux-3.0.4/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5022@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026+
5027+#ifdef CONFIG_PAX_PAGEEXEC
5028+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031+#endif
5032+
5033 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035
5036diff -urNp linux-3.0.4/arch/um/include/asm/kmap_types.h linux-3.0.4/arch/um/include/asm/kmap_types.h
5037--- linux-3.0.4/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5038+++ linux-3.0.4/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5039@@ -23,6 +23,7 @@ enum km_type {
5040 KM_IRQ1,
5041 KM_SOFTIRQ0,
5042 KM_SOFTIRQ1,
5043+ KM_CLEARPAGE,
5044 KM_TYPE_NR
5045 };
5046
5047diff -urNp linux-3.0.4/arch/um/include/asm/page.h linux-3.0.4/arch/um/include/asm/page.h
5048--- linux-3.0.4/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5049+++ linux-3.0.4/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5050@@ -14,6 +14,9 @@
5051 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5052 #define PAGE_MASK (~(PAGE_SIZE-1))
5053
5054+#define ktla_ktva(addr) (addr)
5055+#define ktva_ktla(addr) (addr)
5056+
5057 #ifndef __ASSEMBLY__
5058
5059 struct page;
5060diff -urNp linux-3.0.4/arch/um/kernel/process.c linux-3.0.4/arch/um/kernel/process.c
5061--- linux-3.0.4/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5062+++ linux-3.0.4/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5063@@ -404,22 +404,6 @@ int singlestepping(void * t)
5064 return 2;
5065 }
5066
5067-/*
5068- * Only x86 and x86_64 have an arch_align_stack().
5069- * All other arches have "#define arch_align_stack(x) (x)"
5070- * in their asm/system.h
5071- * As this is included in UML from asm-um/system-generic.h,
5072- * we can use it to behave as the subarch does.
5073- */
5074-#ifndef arch_align_stack
5075-unsigned long arch_align_stack(unsigned long sp)
5076-{
5077- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078- sp -= get_random_int() % 8192;
5079- return sp & ~0xf;
5080-}
5081-#endif
5082-
5083 unsigned long get_wchan(struct task_struct *p)
5084 {
5085 unsigned long stack_page, sp, ip;
5086diff -urNp linux-3.0.4/arch/um/sys-i386/syscalls.c linux-3.0.4/arch/um/sys-i386/syscalls.c
5087--- linux-3.0.4/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5088+++ linux-3.0.4/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5089@@ -11,6 +11,21 @@
5090 #include "asm/uaccess.h"
5091 #include "asm/unistd.h"
5092
5093+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5094+{
5095+ unsigned long pax_task_size = TASK_SIZE;
5096+
5097+#ifdef CONFIG_PAX_SEGMEXEC
5098+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5099+ pax_task_size = SEGMEXEC_TASK_SIZE;
5100+#endif
5101+
5102+ if (len > pax_task_size || addr > pax_task_size - len)
5103+ return -EINVAL;
5104+
5105+ return 0;
5106+}
5107+
5108 /*
5109 * The prototype on i386 is:
5110 *
5111diff -urNp linux-3.0.4/arch/x86/boot/bitops.h linux-3.0.4/arch/x86/boot/bitops.h
5112--- linux-3.0.4/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5113+++ linux-3.0.4/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5114@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5115 u8 v;
5116 const u32 *p = (const u32 *)addr;
5117
5118- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5119+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5120 return v;
5121 }
5122
5123@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5124
5125 static inline void set_bit(int nr, void *addr)
5126 {
5127- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5128+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5129 }
5130
5131 #endif /* BOOT_BITOPS_H */
5132diff -urNp linux-3.0.4/arch/x86/boot/boot.h linux-3.0.4/arch/x86/boot/boot.h
5133--- linux-3.0.4/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5134+++ linux-3.0.4/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5135@@ -85,7 +85,7 @@ static inline void io_delay(void)
5136 static inline u16 ds(void)
5137 {
5138 u16 seg;
5139- asm("movw %%ds,%0" : "=rm" (seg));
5140+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5141 return seg;
5142 }
5143
5144@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5145 static inline int memcmp(const void *s1, const void *s2, size_t len)
5146 {
5147 u8 diff;
5148- asm("repe; cmpsb; setnz %0"
5149+ asm volatile("repe; cmpsb; setnz %0"
5150 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5151 return diff;
5152 }
5153diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_32.S linux-3.0.4/arch/x86/boot/compressed/head_32.S
5154--- linux-3.0.4/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5155+++ linux-3.0.4/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5156@@ -76,7 +76,7 @@ ENTRY(startup_32)
5157 notl %eax
5158 andl %eax, %ebx
5159 #else
5160- movl $LOAD_PHYSICAL_ADDR, %ebx
5161+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5162 #endif
5163
5164 /* Target address to relocate to for decompression */
5165@@ -162,7 +162,7 @@ relocated:
5166 * and where it was actually loaded.
5167 */
5168 movl %ebp, %ebx
5169- subl $LOAD_PHYSICAL_ADDR, %ebx
5170+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5171 jz 2f /* Nothing to be done if loaded at compiled addr. */
5172 /*
5173 * Process relocations.
5174@@ -170,8 +170,7 @@ relocated:
5175
5176 1: subl $4, %edi
5177 movl (%edi), %ecx
5178- testl %ecx, %ecx
5179- jz 2f
5180+ jecxz 2f
5181 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5182 jmp 1b
5183 2:
5184diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_64.S linux-3.0.4/arch/x86/boot/compressed/head_64.S
5185--- linux-3.0.4/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5186+++ linux-3.0.4/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5187@@ -91,7 +91,7 @@ ENTRY(startup_32)
5188 notl %eax
5189 andl %eax, %ebx
5190 #else
5191- movl $LOAD_PHYSICAL_ADDR, %ebx
5192+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5193 #endif
5194
5195 /* Target address to relocate to for decompression */
5196@@ -233,7 +233,7 @@ ENTRY(startup_64)
5197 notq %rax
5198 andq %rax, %rbp
5199 #else
5200- movq $LOAD_PHYSICAL_ADDR, %rbp
5201+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205diff -urNp linux-3.0.4/arch/x86/boot/compressed/Makefile linux-3.0.4/arch/x86/boot/compressed/Makefile
5206--- linux-3.0.4/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5207+++ linux-3.0.4/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5208@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5209 KBUILD_CFLAGS += $(cflags-y)
5210 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5211 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5212+ifdef CONSTIFY_PLUGIN
5213+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5214+endif
5215
5216 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5217 GCOV_PROFILE := n
5218diff -urNp linux-3.0.4/arch/x86/boot/compressed/misc.c linux-3.0.4/arch/x86/boot/compressed/misc.c
5219--- linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5220+++ linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5221@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5222 case PT_LOAD:
5223 #ifdef CONFIG_RELOCATABLE
5224 dest = output;
5225- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5226+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5227 #else
5228 dest = (void *)(phdr->p_paddr);
5229 #endif
5230@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5231 error("Destination address too large");
5232 #endif
5233 #ifndef CONFIG_RELOCATABLE
5234- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5235+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5236 error("Wrong destination address");
5237 #endif
5238
5239diff -urNp linux-3.0.4/arch/x86/boot/compressed/relocs.c linux-3.0.4/arch/x86/boot/compressed/relocs.c
5240--- linux-3.0.4/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5241+++ linux-3.0.4/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5242@@ -13,8 +13,11 @@
5243
5244 static void die(char *fmt, ...);
5245
5246+#include "../../../../include/generated/autoconf.h"
5247+
5248 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5249 static Elf32_Ehdr ehdr;
5250+static Elf32_Phdr *phdr;
5251 static unsigned long reloc_count, reloc_idx;
5252 static unsigned long *relocs;
5253
5254@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5255 }
5256 }
5257
5258+static void read_phdrs(FILE *fp)
5259+{
5260+ unsigned int i;
5261+
5262+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5263+ if (!phdr) {
5264+ die("Unable to allocate %d program headers\n",
5265+ ehdr.e_phnum);
5266+ }
5267+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5268+ die("Seek to %d failed: %s\n",
5269+ ehdr.e_phoff, strerror(errno));
5270+ }
5271+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5272+ die("Cannot read ELF program headers: %s\n",
5273+ strerror(errno));
5274+ }
5275+ for(i = 0; i < ehdr.e_phnum; i++) {
5276+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5277+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5278+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5279+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5280+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5281+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5282+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5283+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5284+ }
5285+
5286+}
5287+
5288 static void read_shdrs(FILE *fp)
5289 {
5290- int i;
5291+ unsigned int i;
5292 Elf32_Shdr shdr;
5293
5294 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5295@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5296
5297 static void read_strtabs(FILE *fp)
5298 {
5299- int i;
5300+ unsigned int i;
5301 for (i = 0; i < ehdr.e_shnum; i++) {
5302 struct section *sec = &secs[i];
5303 if (sec->shdr.sh_type != SHT_STRTAB) {
5304@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5305
5306 static void read_symtabs(FILE *fp)
5307 {
5308- int i,j;
5309+ unsigned int i,j;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_SYMTAB) {
5313@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5314
5315 static void read_relocs(FILE *fp)
5316 {
5317- int i,j;
5318+ unsigned int i,j;
5319+ uint32_t base;
5320+
5321 for (i = 0; i < ehdr.e_shnum; i++) {
5322 struct section *sec = &secs[i];
5323 if (sec->shdr.sh_type != SHT_REL) {
5324@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5325 die("Cannot read symbol table: %s\n",
5326 strerror(errno));
5327 }
5328+ base = 0;
5329+ for (j = 0; j < ehdr.e_phnum; j++) {
5330+ if (phdr[j].p_type != PT_LOAD )
5331+ continue;
5332+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5333+ continue;
5334+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5335+ break;
5336+ }
5337 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5338 Elf32_Rel *rel = &sec->reltab[j];
5339- rel->r_offset = elf32_to_cpu(rel->r_offset);
5340+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5341 rel->r_info = elf32_to_cpu(rel->r_info);
5342 }
5343 }
5344@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5345
5346 static void print_absolute_symbols(void)
5347 {
5348- int i;
5349+ unsigned int i;
5350 printf("Absolute symbols\n");
5351 printf(" Num: Value Size Type Bind Visibility Name\n");
5352 for (i = 0; i < ehdr.e_shnum; i++) {
5353 struct section *sec = &secs[i];
5354 char *sym_strtab;
5355 Elf32_Sym *sh_symtab;
5356- int j;
5357+ unsigned int j;
5358
5359 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360 continue;
5361@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5362
5363 static void print_absolute_relocs(void)
5364 {
5365- int i, printed = 0;
5366+ unsigned int i, printed = 0;
5367
5368 for (i = 0; i < ehdr.e_shnum; i++) {
5369 struct section *sec = &secs[i];
5370 struct section *sec_applies, *sec_symtab;
5371 char *sym_strtab;
5372 Elf32_Sym *sh_symtab;
5373- int j;
5374+ unsigned int j;
5375 if (sec->shdr.sh_type != SHT_REL) {
5376 continue;
5377 }
5378@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5379
5380 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5381 {
5382- int i;
5383+ unsigned int i;
5384 /* Walk through the relocations */
5385 for (i = 0; i < ehdr.e_shnum; i++) {
5386 char *sym_strtab;
5387 Elf32_Sym *sh_symtab;
5388 struct section *sec_applies, *sec_symtab;
5389- int j;
5390+ unsigned int j;
5391 struct section *sec = &secs[i];
5392
5393 if (sec->shdr.sh_type != SHT_REL) {
5394@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5395 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5396 continue;
5397 }
5398+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5399+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5400+ continue;
5401+
5402+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5403+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5404+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5405+ continue;
5406+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5407+ continue;
5408+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5409+ continue;
5410+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5411+ continue;
5412+#endif
5413+
5414 switch (r_type) {
5415 case R_386_NONE:
5416 case R_386_PC32:
5417@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5418
5419 static void emit_relocs(int as_text)
5420 {
5421- int i;
5422+ unsigned int i;
5423 /* Count how many relocations I have and allocate space for them. */
5424 reloc_count = 0;
5425 walk_relocs(count_reloc);
5426@@ -665,6 +725,7 @@ int main(int argc, char **argv)
5427 fname, strerror(errno));
5428 }
5429 read_ehdr(fp);
5430+ read_phdrs(fp);
5431 read_shdrs(fp);
5432 read_strtabs(fp);
5433 read_symtabs(fp);
5434diff -urNp linux-3.0.4/arch/x86/boot/cpucheck.c linux-3.0.4/arch/x86/boot/cpucheck.c
5435--- linux-3.0.4/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5436+++ linux-3.0.4/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5437@@ -74,7 +74,7 @@ static int has_fpu(void)
5438 u16 fcw = -1, fsw = -1;
5439 u32 cr0;
5440
5441- asm("movl %%cr0,%0" : "=r" (cr0));
5442+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
5443 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5444 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5445 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5446@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5447 {
5448 u32 f0, f1;
5449
5450- asm("pushfl ; "
5451+ asm volatile("pushfl ; "
5452 "pushfl ; "
5453 "popl %0 ; "
5454 "movl %0,%1 ; "
5455@@ -115,7 +115,7 @@ static void get_flags(void)
5456 set_bit(X86_FEATURE_FPU, cpu.flags);
5457
5458 if (has_eflag(X86_EFLAGS_ID)) {
5459- asm("cpuid"
5460+ asm volatile("cpuid"
5461 : "=a" (max_intel_level),
5462 "=b" (cpu_vendor[0]),
5463 "=d" (cpu_vendor[1]),
5464@@ -124,7 +124,7 @@ static void get_flags(void)
5465
5466 if (max_intel_level >= 0x00000001 &&
5467 max_intel_level <= 0x0000ffff) {
5468- asm("cpuid"
5469+ asm volatile("cpuid"
5470 : "=a" (tfms),
5471 "=c" (cpu.flags[4]),
5472 "=d" (cpu.flags[0])
5473@@ -136,7 +136,7 @@ static void get_flags(void)
5474 cpu.model += ((tfms >> 16) & 0xf) << 4;
5475 }
5476
5477- asm("cpuid"
5478+ asm volatile("cpuid"
5479 : "=a" (max_amd_level)
5480 : "a" (0x80000000)
5481 : "ebx", "ecx", "edx");
5482@@ -144,7 +144,7 @@ static void get_flags(void)
5483 if (max_amd_level >= 0x80000001 &&
5484 max_amd_level <= 0x8000ffff) {
5485 u32 eax = 0x80000001;
5486- asm("cpuid"
5487+ asm volatile("cpuid"
5488 : "+a" (eax),
5489 "=c" (cpu.flags[6]),
5490 "=d" (cpu.flags[1])
5491@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5492 u32 ecx = MSR_K7_HWCR;
5493 u32 eax, edx;
5494
5495- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5496+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5497 eax &= ~(1 << 15);
5498- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5499+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5500
5501 get_flags(); /* Make sure it really did something */
5502 err = check_flags();
5503@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5504 u32 ecx = MSR_VIA_FCR;
5505 u32 eax, edx;
5506
5507- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5508+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5509 eax |= (1<<1)|(1<<7);
5510- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5511+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5512
5513 set_bit(X86_FEATURE_CX8, cpu.flags);
5514 err = check_flags();
5515@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5516 u32 eax, edx;
5517 u32 level = 1;
5518
5519- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5520- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5521- asm("cpuid"
5522+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5523+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5524+ asm volatile("cpuid"
5525 : "+a" (level), "=d" (cpu.flags[0])
5526 : : "ecx", "ebx");
5527- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5528+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529
5530 err = check_flags();
5531 }
5532diff -urNp linux-3.0.4/arch/x86/boot/header.S linux-3.0.4/arch/x86/boot/header.S
5533--- linux-3.0.4/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
5534+++ linux-3.0.4/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
5535@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5536 # single linked list of
5537 # struct setup_data
5538
5539-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5540+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5541
5542 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5543 #define VO_INIT_SIZE (VO__end - VO__text)
5544diff -urNp linux-3.0.4/arch/x86/boot/Makefile linux-3.0.4/arch/x86/boot/Makefile
5545--- linux-3.0.4/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5546+++ linux-3.0.4/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5547@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5548 $(call cc-option, -fno-stack-protector) \
5549 $(call cc-option, -mpreferred-stack-boundary=2)
5550 KBUILD_CFLAGS += $(call cc-option, -m32)
5551+ifdef CONSTIFY_PLUGIN
5552+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5553+endif
5554 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5555 GCOV_PROFILE := n
5556
5557diff -urNp linux-3.0.4/arch/x86/boot/memory.c linux-3.0.4/arch/x86/boot/memory.c
5558--- linux-3.0.4/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
5559+++ linux-3.0.4/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
5560@@ -19,7 +19,7 @@
5561
5562 static int detect_memory_e820(void)
5563 {
5564- int count = 0;
5565+ unsigned int count = 0;
5566 struct biosregs ireg, oreg;
5567 struct e820entry *desc = boot_params.e820_map;
5568 static struct e820entry buf; /* static so it is zeroed */
5569diff -urNp linux-3.0.4/arch/x86/boot/video.c linux-3.0.4/arch/x86/boot/video.c
5570--- linux-3.0.4/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
5571+++ linux-3.0.4/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
5572@@ -96,7 +96,7 @@ static void store_mode_params(void)
5573 static unsigned int get_entry(void)
5574 {
5575 char entry_buf[4];
5576- int i, len = 0;
5577+ unsigned int i, len = 0;
5578 int key;
5579 unsigned int v;
5580
5581diff -urNp linux-3.0.4/arch/x86/boot/video-vesa.c linux-3.0.4/arch/x86/boot/video-vesa.c
5582--- linux-3.0.4/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
5583+++ linux-3.0.4/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
5584@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5585
5586 boot_params.screen_info.vesapm_seg = oreg.es;
5587 boot_params.screen_info.vesapm_off = oreg.di;
5588+ boot_params.screen_info.vesapm_size = oreg.cx;
5589 }
5590
5591 /*
5592diff -urNp linux-3.0.4/arch/x86/ia32/ia32_aout.c linux-3.0.4/arch/x86/ia32/ia32_aout.c
5593--- linux-3.0.4/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
5594+++ linux-3.0.4/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
5595@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5596 unsigned long dump_start, dump_size;
5597 struct user32 dump;
5598
5599+ memset(&dump, 0, sizeof(dump));
5600+
5601 fs = get_fs();
5602 set_fs(KERNEL_DS);
5603 has_dumped = 1;
5604diff -urNp linux-3.0.4/arch/x86/ia32/ia32entry.S linux-3.0.4/arch/x86/ia32/ia32entry.S
5605--- linux-3.0.4/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
5606+++ linux-3.0.4/arch/x86/ia32/ia32entry.S 2011-08-25 17:36:37.000000000 -0400
5607@@ -13,6 +13,7 @@
5608 #include <asm/thread_info.h>
5609 #include <asm/segment.h>
5610 #include <asm/irqflags.h>
5611+#include <asm/pgtable.h>
5612 #include <linux/linkage.h>
5613
5614 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5615@@ -95,6 +96,29 @@ ENTRY(native_irq_enable_sysexit)
5616 ENDPROC(native_irq_enable_sysexit)
5617 #endif
5618
5619+ .macro pax_enter_kernel_user
5620+#ifdef CONFIG_PAX_MEMORY_UDEREF
5621+ call pax_enter_kernel_user
5622+#endif
5623+ .endm
5624+
5625+ .macro pax_exit_kernel_user
5626+#ifdef CONFIG_PAX_MEMORY_UDEREF
5627+ call pax_exit_kernel_user
5628+#endif
5629+#ifdef CONFIG_PAX_RANDKSTACK
5630+ pushq %rax
5631+ call pax_randomize_kstack
5632+ popq %rax
5633+#endif
5634+ .endm
5635+
5636+ .macro pax_erase_kstack
5637+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5638+ call pax_erase_kstack
5639+#endif
5640+ .endm
5641+
5642 /*
5643 * 32bit SYSENTER instruction entry.
5644 *
5645@@ -121,7 +145,7 @@ ENTRY(ia32_sysenter_target)
5646 CFI_REGISTER rsp,rbp
5647 SWAPGS_UNSAFE_STACK
5648 movq PER_CPU_VAR(kernel_stack), %rsp
5649- addq $(KERNEL_STACK_OFFSET),%rsp
5650+ pax_enter_kernel_user
5651 /*
5652 * No need to follow this irqs on/off section: the syscall
5653 * disabled irqs, here we enable it straight after entry:
5654@@ -134,7 +158,8 @@ ENTRY(ia32_sysenter_target)
5655 CFI_REL_OFFSET rsp,0
5656 pushfq_cfi
5657 /*CFI_REL_OFFSET rflags,0*/
5658- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5659+ GET_THREAD_INFO(%r10)
5660+ movl TI_sysenter_return(%r10), %r10d
5661 CFI_REGISTER rip,r10
5662 pushq_cfi $__USER32_CS
5663 /*CFI_REL_OFFSET cs,0*/
5664@@ -146,6 +171,12 @@ ENTRY(ia32_sysenter_target)
5665 SAVE_ARGS 0,0,1
5666 /* no need to do an access_ok check here because rbp has been
5667 32bit zero extended */
5668+
5669+#ifdef CONFIG_PAX_MEMORY_UDEREF
5670+ mov $PAX_USER_SHADOW_BASE,%r10
5671+ add %r10,%rbp
5672+#endif
5673+
5674 1: movl (%rbp),%ebp
5675 .section __ex_table,"a"
5676 .quad 1b,ia32_badarg
5677@@ -168,6 +199,8 @@ sysenter_dispatch:
5678 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5679 jnz sysexit_audit
5680 sysexit_from_sys_call:
5681+ pax_exit_kernel_user
5682+ pax_erase_kstack
5683 andl $~TS_COMPAT,TI_status(%r10)
5684 /* clear IF, that popfq doesn't enable interrupts early */
5685 andl $~0x200,EFLAGS-R11(%rsp)
5686@@ -194,6 +227,9 @@ sysexit_from_sys_call:
5687 movl %eax,%esi /* 2nd arg: syscall number */
5688 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5689 call audit_syscall_entry
5690+
5691+ pax_erase_kstack
5692+
5693 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5694 cmpq $(IA32_NR_syscalls-1),%rax
5695 ja ia32_badsys
5696@@ -246,6 +282,9 @@ sysenter_tracesys:
5697 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5698 movq %rsp,%rdi /* &pt_regs -> arg1 */
5699 call syscall_trace_enter
5700+
5701+ pax_erase_kstack
5702+
5703 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5704 RESTORE_REST
5705 cmpq $(IA32_NR_syscalls-1),%rax
5706@@ -277,19 +316,24 @@ ENDPROC(ia32_sysenter_target)
5707 ENTRY(ia32_cstar_target)
5708 CFI_STARTPROC32 simple
5709 CFI_SIGNAL_FRAME
5710- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5711+ CFI_DEF_CFA rsp,0
5712 CFI_REGISTER rip,rcx
5713 /*CFI_REGISTER rflags,r11*/
5714 SWAPGS_UNSAFE_STACK
5715 movl %esp,%r8d
5716 CFI_REGISTER rsp,r8
5717 movq PER_CPU_VAR(kernel_stack),%rsp
5718+
5719+#ifdef CONFIG_PAX_MEMORY_UDEREF
5720+ pax_enter_kernel_user
5721+#endif
5722+
5723 /*
5724 * No need to follow this irqs on/off section: the syscall
5725 * disabled irqs and here we enable it straight after entry:
5726 */
5727 ENABLE_INTERRUPTS(CLBR_NONE)
5728- SAVE_ARGS 8,1,1
5729+ SAVE_ARGS 8*6,1,1
5730 movl %eax,%eax /* zero extension */
5731 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5732 movq %rcx,RIP-ARGOFFSET(%rsp)
5733@@ -305,6 +349,12 @@ ENTRY(ia32_cstar_target)
5734 /* no need to do an access_ok check here because r8 has been
5735 32bit zero extended */
5736 /* hardware stack frame is complete now */
5737+
5738+#ifdef CONFIG_PAX_MEMORY_UDEREF
5739+ mov $PAX_USER_SHADOW_BASE,%r10
5740+ add %r10,%r8
5741+#endif
5742+
5743 1: movl (%r8),%r9d
5744 .section __ex_table,"a"
5745 .quad 1b,ia32_badarg
5746@@ -327,6 +377,8 @@ cstar_dispatch:
5747 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5748 jnz sysretl_audit
5749 sysretl_from_sys_call:
5750+ pax_exit_kernel_user
5751+ pax_erase_kstack
5752 andl $~TS_COMPAT,TI_status(%r10)
5753 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5754 movl RIP-ARGOFFSET(%rsp),%ecx
5755@@ -364,6 +416,9 @@ cstar_tracesys:
5756 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5757 movq %rsp,%rdi /* &pt_regs -> arg1 */
5758 call syscall_trace_enter
5759+
5760+ pax_erase_kstack
5761+
5762 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5763 RESTORE_REST
5764 xchgl %ebp,%r9d
5765@@ -409,6 +464,7 @@ ENTRY(ia32_syscall)
5766 CFI_REL_OFFSET rip,RIP-RIP
5767 PARAVIRT_ADJUST_EXCEPTION_FRAME
5768 SWAPGS
5769+ pax_enter_kernel_user
5770 /*
5771 * No need to follow this irqs on/off section: the syscall
5772 * disabled irqs and here we enable it straight after entry:
5773@@ -441,6 +497,9 @@ ia32_tracesys:
5774 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5775 movq %rsp,%rdi /* &pt_regs -> arg1 */
5776 call syscall_trace_enter
5777+
5778+ pax_erase_kstack
5779+
5780 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5781 RESTORE_REST
5782 cmpq $(IA32_NR_syscalls-1),%rax
5783diff -urNp linux-3.0.4/arch/x86/ia32/ia32_signal.c linux-3.0.4/arch/x86/ia32/ia32_signal.c
5784--- linux-3.0.4/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
5785+++ linux-3.0.4/arch/x86/ia32/ia32_signal.c 2011-08-23 21:47:55.000000000 -0400
5786@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5787 sp -= frame_size;
5788 /* Align the stack pointer according to the i386 ABI,
5789 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5790- sp = ((sp + 4) & -16ul) - 4;
5791+ sp = ((sp - 12) & -16ul) - 4;
5792 return (void __user *) sp;
5793 }
5794
5795@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5796 * These are actually not used anymore, but left because some
5797 * gdb versions depend on them as a marker.
5798 */
5799- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5800+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5801 } put_user_catch(err);
5802
5803 if (err)
5804@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5805 0xb8,
5806 __NR_ia32_rt_sigreturn,
5807 0x80cd,
5808- 0,
5809+ 0
5810 };
5811
5812 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5813@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5814
5815 if (ka->sa.sa_flags & SA_RESTORER)
5816 restorer = ka->sa.sa_restorer;
5817+ else if (current->mm->context.vdso)
5818+ /* Return stub is in 32bit vsyscall page */
5819+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5820 else
5821- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5822- rt_sigreturn);
5823+ restorer = &frame->retcode;
5824 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5825
5826 /*
5827 * Not actually used anymore, but left because some gdb
5828 * versions need it.
5829 */
5830- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5831+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5832 } put_user_catch(err);
5833
5834 if (err)
5835diff -urNp linux-3.0.4/arch/x86/include/asm/alternative.h linux-3.0.4/arch/x86/include/asm/alternative.h
5836--- linux-3.0.4/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
5837+++ linux-3.0.4/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
5838@@ -93,7 +93,7 @@ static inline int alternatives_text_rese
5839 ".section .discard,\"aw\",@progbits\n" \
5840 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5841 ".previous\n" \
5842- ".section .altinstr_replacement, \"ax\"\n" \
5843+ ".section .altinstr_replacement, \"a\"\n" \
5844 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5845 ".previous"
5846
5847diff -urNp linux-3.0.4/arch/x86/include/asm/apic.h linux-3.0.4/arch/x86/include/asm/apic.h
5848--- linux-3.0.4/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
5849+++ linux-3.0.4/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
5850@@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
5851
5852 #ifdef CONFIG_X86_LOCAL_APIC
5853
5854-extern unsigned int apic_verbosity;
5855+extern int apic_verbosity;
5856 extern int local_apic_timer_c2_ok;
5857
5858 extern int disable_apic;
5859diff -urNp linux-3.0.4/arch/x86/include/asm/apm.h linux-3.0.4/arch/x86/include/asm/apm.h
5860--- linux-3.0.4/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
5861+++ linux-3.0.4/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
5862@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5863 __asm__ __volatile__(APM_DO_ZERO_SEGS
5864 "pushl %%edi\n\t"
5865 "pushl %%ebp\n\t"
5866- "lcall *%%cs:apm_bios_entry\n\t"
5867+ "lcall *%%ss:apm_bios_entry\n\t"
5868 "setc %%al\n\t"
5869 "popl %%ebp\n\t"
5870 "popl %%edi\n\t"
5871@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5872 __asm__ __volatile__(APM_DO_ZERO_SEGS
5873 "pushl %%edi\n\t"
5874 "pushl %%ebp\n\t"
5875- "lcall *%%cs:apm_bios_entry\n\t"
5876+ "lcall *%%ss:apm_bios_entry\n\t"
5877 "setc %%bl\n\t"
5878 "popl %%ebp\n\t"
5879 "popl %%edi\n\t"
5880diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_32.h linux-3.0.4/arch/x86/include/asm/atomic64_32.h
5881--- linux-3.0.4/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
5882+++ linux-3.0.4/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
5883@@ -12,6 +12,14 @@ typedef struct {
5884 u64 __aligned(8) counter;
5885 } atomic64_t;
5886
5887+#ifdef CONFIG_PAX_REFCOUNT
5888+typedef struct {
5889+ u64 __aligned(8) counter;
5890+} atomic64_unchecked_t;
5891+#else
5892+typedef atomic64_t atomic64_unchecked_t;
5893+#endif
5894+
5895 #define ATOMIC64_INIT(val) { (val) }
5896
5897 #ifdef CONFIG_X86_CMPXCHG64
5898@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5899 }
5900
5901 /**
5902+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5903+ * @p: pointer to type atomic64_unchecked_t
5904+ * @o: expected value
5905+ * @n: new value
5906+ *
5907+ * Atomically sets @v to @n if it was equal to @o and returns
5908+ * the old value.
5909+ */
5910+
5911+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5912+{
5913+ return cmpxchg64(&v->counter, o, n);
5914+}
5915+
5916+/**
5917 * atomic64_xchg - xchg atomic64 variable
5918 * @v: pointer to type atomic64_t
5919 * @n: value to assign
5920@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5921 }
5922
5923 /**
5924+ * atomic64_set_unchecked - set atomic64 variable
5925+ * @v: pointer to type atomic64_unchecked_t
5926+ * @n: value to assign
5927+ *
5928+ * Atomically sets the value of @v to @n.
5929+ */
5930+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5931+{
5932+ unsigned high = (unsigned)(i >> 32);
5933+ unsigned low = (unsigned)i;
5934+ asm volatile(ATOMIC64_ALTERNATIVE(set)
5935+ : "+b" (low), "+c" (high)
5936+ : "S" (v)
5937+ : "eax", "edx", "memory"
5938+ );
5939+}
5940+
5941+/**
5942 * atomic64_read - read atomic64 variable
5943 * @v: pointer to type atomic64_t
5944 *
5945@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
5946 }
5947
5948 /**
5949+ * atomic64_read_unchecked - read atomic64 variable
5950+ * @v: pointer to type atomic64_unchecked_t
5951+ *
5952+ * Atomically reads the value of @v and returns it.
5953+ */
5954+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
5955+{
5956+ long long r;
5957+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
5958+ : "=A" (r), "+c" (v)
5959+ : : "memory"
5960+ );
5961+ return r;
5962+ }
5963+
5964+/**
5965 * atomic64_add_return - add and return
5966 * @i: integer value to add
5967 * @v: pointer to type atomic64_t
5968@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
5969 return i;
5970 }
5971
5972+/**
5973+ * atomic64_add_return_unchecked - add and return
5974+ * @i: integer value to add
5975+ * @v: pointer to type atomic64_unchecked_t
5976+ *
5977+ * Atomically adds @i to @v and returns @i + *@v
5978+ */
5979+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
5980+{
5981+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
5982+ : "+A" (i), "+c" (v)
5983+ : : "memory"
5984+ );
5985+ return i;
5986+}
5987+
5988 /*
5989 * Other variants with different arithmetic operators:
5990 */
5991@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
5992 return a;
5993 }
5994
5995+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5996+{
5997+ long long a;
5998+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
5999+ : "=A" (a)
6000+ : "S" (v)
6001+ : "memory", "ecx"
6002+ );
6003+ return a;
6004+}
6005+
6006 static inline long long atomic64_dec_return(atomic64_t *v)
6007 {
6008 long long a;
6009@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6010 }
6011
6012 /**
6013+ * atomic64_add_unchecked - add integer to atomic64 variable
6014+ * @i: integer value to add
6015+ * @v: pointer to type atomic64_unchecked_t
6016+ *
6017+ * Atomically adds @i to @v.
6018+ */
6019+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6020+{
6021+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6022+ : "+A" (i), "+c" (v)
6023+ : : "memory"
6024+ );
6025+ return i;
6026+}
6027+
6028+/**
6029 * atomic64_sub - subtract the atomic64 variable
6030 * @i: integer value to subtract
6031 * @v: pointer to type atomic64_t
6032diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_64.h linux-3.0.4/arch/x86/include/asm/atomic64_64.h
6033--- linux-3.0.4/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
6034+++ linux-3.0.4/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
6035@@ -18,7 +18,19 @@
6036 */
6037 static inline long atomic64_read(const atomic64_t *v)
6038 {
6039- return (*(volatile long *)&(v)->counter);
6040+ return (*(volatile const long *)&(v)->counter);
6041+}
6042+
6043+/**
6044+ * atomic64_read_unchecked - read atomic64 variable
6045+ * @v: pointer of type atomic64_unchecked_t
6046+ *
6047+ * Atomically reads the value of @v.
6048+ * Doesn't imply a read memory barrier.
6049+ */
6050+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6051+{
6052+ return (*(volatile const long *)&(v)->counter);
6053 }
6054
6055 /**
6056@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6057 }
6058
6059 /**
6060+ * atomic64_set_unchecked - set atomic64 variable
6061+ * @v: pointer to type atomic64_unchecked_t
6062+ * @i: required value
6063+ *
6064+ * Atomically sets the value of @v to @i.
6065+ */
6066+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6067+{
6068+ v->counter = i;
6069+}
6070+
6071+/**
6072 * atomic64_add - add integer to atomic64 variable
6073 * @i: integer value to add
6074 * @v: pointer to type atomic64_t
6075@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6076 */
6077 static inline void atomic64_add(long i, atomic64_t *v)
6078 {
6079+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
6080+
6081+#ifdef CONFIG_PAX_REFCOUNT
6082+ "jno 0f\n"
6083+ LOCK_PREFIX "subq %1,%0\n"
6084+ "int $4\n0:\n"
6085+ _ASM_EXTABLE(0b, 0b)
6086+#endif
6087+
6088+ : "=m" (v->counter)
6089+ : "er" (i), "m" (v->counter));
6090+}
6091+
6092+/**
6093+ * atomic64_add_unchecked - add integer to atomic64 variable
6094+ * @i: integer value to add
6095+ * @v: pointer to type atomic64_unchecked_t
6096+ *
6097+ * Atomically adds @i to @v.
6098+ */
6099+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6100+{
6101 asm volatile(LOCK_PREFIX "addq %1,%0"
6102 : "=m" (v->counter)
6103 : "er" (i), "m" (v->counter));
6104@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6105 */
6106 static inline void atomic64_sub(long i, atomic64_t *v)
6107 {
6108- asm volatile(LOCK_PREFIX "subq %1,%0"
6109+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6110+
6111+#ifdef CONFIG_PAX_REFCOUNT
6112+ "jno 0f\n"
6113+ LOCK_PREFIX "addq %1,%0\n"
6114+ "int $4\n0:\n"
6115+ _ASM_EXTABLE(0b, 0b)
6116+#endif
6117+
6118+ : "=m" (v->counter)
6119+ : "er" (i), "m" (v->counter));
6120+}
6121+
6122+/**
6123+ * atomic64_sub_unchecked - subtract the atomic64 variable
6124+ * @i: integer value to subtract
6125+ * @v: pointer to type atomic64_unchecked_t
6126+ *
6127+ * Atomically subtracts @i from @v.
6128+ */
6129+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6130+{
6131+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6132 : "=m" (v->counter)
6133 : "er" (i), "m" (v->counter));
6134 }
6135@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6136 {
6137 unsigned char c;
6138
6139- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6140+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
6141+
6142+#ifdef CONFIG_PAX_REFCOUNT
6143+ "jno 0f\n"
6144+ LOCK_PREFIX "addq %2,%0\n"
6145+ "int $4\n0:\n"
6146+ _ASM_EXTABLE(0b, 0b)
6147+#endif
6148+
6149+ "sete %1\n"
6150 : "=m" (v->counter), "=qm" (c)
6151 : "er" (i), "m" (v->counter) : "memory");
6152 return c;
6153@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6154 */
6155 static inline void atomic64_inc(atomic64_t *v)
6156 {
6157+ asm volatile(LOCK_PREFIX "incq %0\n"
6158+
6159+#ifdef CONFIG_PAX_REFCOUNT
6160+ "jno 0f\n"
6161+ LOCK_PREFIX "decq %0\n"
6162+ "int $4\n0:\n"
6163+ _ASM_EXTABLE(0b, 0b)
6164+#endif
6165+
6166+ : "=m" (v->counter)
6167+ : "m" (v->counter));
6168+}
6169+
6170+/**
6171+ * atomic64_inc_unchecked - increment atomic64 variable
6172+ * @v: pointer to type atomic64_unchecked_t
6173+ *
6174+ * Atomically increments @v by 1.
6175+ */
6176+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6177+{
6178 asm volatile(LOCK_PREFIX "incq %0"
6179 : "=m" (v->counter)
6180 : "m" (v->counter));
6181@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6182 */
6183 static inline void atomic64_dec(atomic64_t *v)
6184 {
6185- asm volatile(LOCK_PREFIX "decq %0"
6186+ asm volatile(LOCK_PREFIX "decq %0\n"
6187+
6188+#ifdef CONFIG_PAX_REFCOUNT
6189+ "jno 0f\n"
6190+ LOCK_PREFIX "incq %0\n"
6191+ "int $4\n0:\n"
6192+ _ASM_EXTABLE(0b, 0b)
6193+#endif
6194+
6195+ : "=m" (v->counter)
6196+ : "m" (v->counter));
6197+}
6198+
6199+/**
6200+ * atomic64_dec_unchecked - decrement atomic64 variable
6201+ * @v: pointer to type atomic64_t
6202+ *
6203+ * Atomically decrements @v by 1.
6204+ */
6205+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6206+{
6207+ asm volatile(LOCK_PREFIX "decq %0\n"
6208 : "=m" (v->counter)
6209 : "m" (v->counter));
6210 }
6211@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6212 {
6213 unsigned char c;
6214
6215- asm volatile(LOCK_PREFIX "decq %0; sete %1"
6216+ asm volatile(LOCK_PREFIX "decq %0\n"
6217+
6218+#ifdef CONFIG_PAX_REFCOUNT
6219+ "jno 0f\n"
6220+ LOCK_PREFIX "incq %0\n"
6221+ "int $4\n0:\n"
6222+ _ASM_EXTABLE(0b, 0b)
6223+#endif
6224+
6225+ "sete %1\n"
6226 : "=m" (v->counter), "=qm" (c)
6227 : "m" (v->counter) : "memory");
6228 return c != 0;
6229@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6230 {
6231 unsigned char c;
6232
6233- asm volatile(LOCK_PREFIX "incq %0; sete %1"
6234+ asm volatile(LOCK_PREFIX "incq %0\n"
6235+
6236+#ifdef CONFIG_PAX_REFCOUNT
6237+ "jno 0f\n"
6238+ LOCK_PREFIX "decq %0\n"
6239+ "int $4\n0:\n"
6240+ _ASM_EXTABLE(0b, 0b)
6241+#endif
6242+
6243+ "sete %1\n"
6244 : "=m" (v->counter), "=qm" (c)
6245 : "m" (v->counter) : "memory");
6246 return c != 0;
6247@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6248 {
6249 unsigned char c;
6250
6251- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6252+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
6253+
6254+#ifdef CONFIG_PAX_REFCOUNT
6255+ "jno 0f\n"
6256+ LOCK_PREFIX "subq %2,%0\n"
6257+ "int $4\n0:\n"
6258+ _ASM_EXTABLE(0b, 0b)
6259+#endif
6260+
6261+ "sets %1\n"
6262 : "=m" (v->counter), "=qm" (c)
6263 : "er" (i), "m" (v->counter) : "memory");
6264 return c;
6265@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6266 static inline long atomic64_add_return(long i, atomic64_t *v)
6267 {
6268 long __i = i;
6269- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6270+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6271+
6272+#ifdef CONFIG_PAX_REFCOUNT
6273+ "jno 0f\n"
6274+ "movq %0, %1\n"
6275+ "int $4\n0:\n"
6276+ _ASM_EXTABLE(0b, 0b)
6277+#endif
6278+
6279+ : "+r" (i), "+m" (v->counter)
6280+ : : "memory");
6281+ return i + __i;
6282+}
6283+
6284+/**
6285+ * atomic64_add_return_unchecked - add and return
6286+ * @i: integer value to add
6287+ * @v: pointer to type atomic64_unchecked_t
6288+ *
6289+ * Atomically adds @i to @v and returns @i + @v
6290+ */
6291+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6292+{
6293+ long __i = i;
6294+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
6295 : "+r" (i), "+m" (v->counter)
6296 : : "memory");
6297 return i + __i;
6298@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6299 }
6300
6301 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6302+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6303+{
6304+ return atomic64_add_return_unchecked(1, v);
6305+}
6306 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6307
6308 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6309@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6310 return cmpxchg(&v->counter, old, new);
6311 }
6312
6313+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6314+{
6315+ return cmpxchg(&v->counter, old, new);
6316+}
6317+
6318 static inline long atomic64_xchg(atomic64_t *v, long new)
6319 {
6320 return xchg(&v->counter, new);
6321@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6322 */
6323 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6324 {
6325- long c, old;
6326+ long c, old, new;
6327 c = atomic64_read(v);
6328 for (;;) {
6329- if (unlikely(c == (u)))
6330+ if (unlikely(c == u))
6331 break;
6332- old = atomic64_cmpxchg((v), c, c + (a));
6333+
6334+ asm volatile("add %2,%0\n"
6335+
6336+#ifdef CONFIG_PAX_REFCOUNT
6337+ "jno 0f\n"
6338+ "sub %2,%0\n"
6339+ "int $4\n0:\n"
6340+ _ASM_EXTABLE(0b, 0b)
6341+#endif
6342+
6343+ : "=r" (new)
6344+ : "0" (c), "ir" (a));
6345+
6346+ old = atomic64_cmpxchg(v, c, new);
6347 if (likely(old == c))
6348 break;
6349 c = old;
6350 }
6351- return c != (u);
6352+ return c != u;
6353 }
6354
6355 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6356diff -urNp linux-3.0.4/arch/x86/include/asm/atomic.h linux-3.0.4/arch/x86/include/asm/atomic.h
6357--- linux-3.0.4/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6358+++ linux-3.0.4/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6359@@ -22,7 +22,18 @@
6360 */
6361 static inline int atomic_read(const atomic_t *v)
6362 {
6363- return (*(volatile int *)&(v)->counter);
6364+ return (*(volatile const int *)&(v)->counter);
6365+}
6366+
6367+/**
6368+ * atomic_read_unchecked - read atomic variable
6369+ * @v: pointer of type atomic_unchecked_t
6370+ *
6371+ * Atomically reads the value of @v.
6372+ */
6373+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6374+{
6375+ return (*(volatile const int *)&(v)->counter);
6376 }
6377
6378 /**
6379@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6380 }
6381
6382 /**
6383+ * atomic_set_unchecked - set atomic variable
6384+ * @v: pointer of type atomic_unchecked_t
6385+ * @i: required value
6386+ *
6387+ * Atomically sets the value of @v to @i.
6388+ */
6389+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6390+{
6391+ v->counter = i;
6392+}
6393+
6394+/**
6395 * atomic_add - add integer to atomic variable
6396 * @i: integer value to add
6397 * @v: pointer of type atomic_t
6398@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6399 */
6400 static inline void atomic_add(int i, atomic_t *v)
6401 {
6402- asm volatile(LOCK_PREFIX "addl %1,%0"
6403+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6404+
6405+#ifdef CONFIG_PAX_REFCOUNT
6406+ "jno 0f\n"
6407+ LOCK_PREFIX "subl %1,%0\n"
6408+ "int $4\n0:\n"
6409+ _ASM_EXTABLE(0b, 0b)
6410+#endif
6411+
6412+ : "+m" (v->counter)
6413+ : "ir" (i));
6414+}
6415+
6416+/**
6417+ * atomic_add_unchecked - add integer to atomic variable
6418+ * @i: integer value to add
6419+ * @v: pointer of type atomic_unchecked_t
6420+ *
6421+ * Atomically adds @i to @v.
6422+ */
6423+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6424+{
6425+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6426 : "+m" (v->counter)
6427 : "ir" (i));
6428 }
6429@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6430 */
6431 static inline void atomic_sub(int i, atomic_t *v)
6432 {
6433- asm volatile(LOCK_PREFIX "subl %1,%0"
6434+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6435+
6436+#ifdef CONFIG_PAX_REFCOUNT
6437+ "jno 0f\n"
6438+ LOCK_PREFIX "addl %1,%0\n"
6439+ "int $4\n0:\n"
6440+ _ASM_EXTABLE(0b, 0b)
6441+#endif
6442+
6443+ : "+m" (v->counter)
6444+ : "ir" (i));
6445+}
6446+
6447+/**
6448+ * atomic_sub_unchecked - subtract integer from atomic variable
6449+ * @i: integer value to subtract
6450+ * @v: pointer of type atomic_unchecked_t
6451+ *
6452+ * Atomically subtracts @i from @v.
6453+ */
6454+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6455+{
6456+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6457 : "+m" (v->counter)
6458 : "ir" (i));
6459 }
6460@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6461 {
6462 unsigned char c;
6463
6464- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6465+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6466+
6467+#ifdef CONFIG_PAX_REFCOUNT
6468+ "jno 0f\n"
6469+ LOCK_PREFIX "addl %2,%0\n"
6470+ "int $4\n0:\n"
6471+ _ASM_EXTABLE(0b, 0b)
6472+#endif
6473+
6474+ "sete %1\n"
6475 : "+m" (v->counter), "=qm" (c)
6476 : "ir" (i) : "memory");
6477 return c;
6478@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6479 */
6480 static inline void atomic_inc(atomic_t *v)
6481 {
6482- asm volatile(LOCK_PREFIX "incl %0"
6483+ asm volatile(LOCK_PREFIX "incl %0\n"
6484+
6485+#ifdef CONFIG_PAX_REFCOUNT
6486+ "jno 0f\n"
6487+ LOCK_PREFIX "decl %0\n"
6488+ "int $4\n0:\n"
6489+ _ASM_EXTABLE(0b, 0b)
6490+#endif
6491+
6492+ : "+m" (v->counter));
6493+}
6494+
6495+/**
6496+ * atomic_inc_unchecked - increment atomic variable
6497+ * @v: pointer of type atomic_unchecked_t
6498+ *
6499+ * Atomically increments @v by 1.
6500+ */
6501+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6502+{
6503+ asm volatile(LOCK_PREFIX "incl %0\n"
6504 : "+m" (v->counter));
6505 }
6506
6507@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6508 */
6509 static inline void atomic_dec(atomic_t *v)
6510 {
6511- asm volatile(LOCK_PREFIX "decl %0"
6512+ asm volatile(LOCK_PREFIX "decl %0\n"
6513+
6514+#ifdef CONFIG_PAX_REFCOUNT
6515+ "jno 0f\n"
6516+ LOCK_PREFIX "incl %0\n"
6517+ "int $4\n0:\n"
6518+ _ASM_EXTABLE(0b, 0b)
6519+#endif
6520+
6521+ : "+m" (v->counter));
6522+}
6523+
6524+/**
6525+ * atomic_dec_unchecked - decrement atomic variable
6526+ * @v: pointer of type atomic_unchecked_t
6527+ *
6528+ * Atomically decrements @v by 1.
6529+ */
6530+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6531+{
6532+ asm volatile(LOCK_PREFIX "decl %0\n"
6533 : "+m" (v->counter));
6534 }
6535
6536@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6537 {
6538 unsigned char c;
6539
6540- asm volatile(LOCK_PREFIX "decl %0; sete %1"
6541+ asm volatile(LOCK_PREFIX "decl %0\n"
6542+
6543+#ifdef CONFIG_PAX_REFCOUNT
6544+ "jno 0f\n"
6545+ LOCK_PREFIX "incl %0\n"
6546+ "int $4\n0:\n"
6547+ _ASM_EXTABLE(0b, 0b)
6548+#endif
6549+
6550+ "sete %1\n"
6551 : "+m" (v->counter), "=qm" (c)
6552 : : "memory");
6553 return c != 0;
6554@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6555 {
6556 unsigned char c;
6557
6558- asm volatile(LOCK_PREFIX "incl %0; sete %1"
6559+ asm volatile(LOCK_PREFIX "incl %0\n"
6560+
6561+#ifdef CONFIG_PAX_REFCOUNT
6562+ "jno 0f\n"
6563+ LOCK_PREFIX "decl %0\n"
6564+ "int $4\n0:\n"
6565+ _ASM_EXTABLE(0b, 0b)
6566+#endif
6567+
6568+ "sete %1\n"
6569+ : "+m" (v->counter), "=qm" (c)
6570+ : : "memory");
6571+ return c != 0;
6572+}
6573+
6574+/**
6575+ * atomic_inc_and_test_unchecked - increment and test
6576+ * @v: pointer of type atomic_unchecked_t
6577+ *
6578+ * Atomically increments @v by 1
6579+ * and returns true if the result is zero, or false for all
6580+ * other cases.
6581+ */
6582+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6583+{
6584+ unsigned char c;
6585+
6586+ asm volatile(LOCK_PREFIX "incl %0\n"
6587+ "sete %1\n"
6588 : "+m" (v->counter), "=qm" (c)
6589 : : "memory");
6590 return c != 0;
6591@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6592 {
6593 unsigned char c;
6594
6595- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6596+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
6597+
6598+#ifdef CONFIG_PAX_REFCOUNT
6599+ "jno 0f\n"
6600+ LOCK_PREFIX "subl %2,%0\n"
6601+ "int $4\n0:\n"
6602+ _ASM_EXTABLE(0b, 0b)
6603+#endif
6604+
6605+ "sets %1\n"
6606 : "+m" (v->counter), "=qm" (c)
6607 : "ir" (i) : "memory");
6608 return c;
6609@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6610 #endif
6611 /* Modern 486+ processor */
6612 __i = i;
6613+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6614+
6615+#ifdef CONFIG_PAX_REFCOUNT
6616+ "jno 0f\n"
6617+ "movl %0, %1\n"
6618+ "int $4\n0:\n"
6619+ _ASM_EXTABLE(0b, 0b)
6620+#endif
6621+
6622+ : "+r" (i), "+m" (v->counter)
6623+ : : "memory");
6624+ return i + __i;
6625+
6626+#ifdef CONFIG_M386
6627+no_xadd: /* Legacy 386 processor */
6628+ local_irq_save(flags);
6629+ __i = atomic_read(v);
6630+ atomic_set(v, i + __i);
6631+ local_irq_restore(flags);
6632+ return i + __i;
6633+#endif
6634+}
6635+
6636+/**
6637+ * atomic_add_return_unchecked - add integer and return
6638+ * @v: pointer of type atomic_unchecked_t
6639+ * @i: integer value to add
6640+ *
6641+ * Atomically adds @i to @v and returns @i + @v
6642+ */
6643+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6644+{
6645+ int __i;
6646+#ifdef CONFIG_M386
6647+ unsigned long flags;
6648+ if (unlikely(boot_cpu_data.x86 <= 3))
6649+ goto no_xadd;
6650+#endif
6651+ /* Modern 486+ processor */
6652+ __i = i;
6653 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6654 : "+r" (i), "+m" (v->counter)
6655 : : "memory");
6656@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6657 }
6658
6659 #define atomic_inc_return(v) (atomic_add_return(1, v))
6660+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6661+{
6662+ return atomic_add_return_unchecked(1, v);
6663+}
6664 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6665
6666 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6667@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6668 return cmpxchg(&v->counter, old, new);
6669 }
6670
6671+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6672+{
6673+ return cmpxchg(&v->counter, old, new);
6674+}
6675+
6676 static inline int atomic_xchg(atomic_t *v, int new)
6677 {
6678 return xchg(&v->counter, new);
6679 }
6680
6681+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6682+{
6683+ return xchg(&v->counter, new);
6684+}
6685+
6686 /**
6687 * atomic_add_unless - add unless the number is already a given value
6688 * @v: pointer of type atomic_t
6689@@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6690 */
6691 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6692 {
6693- int c, old;
6694+ int c, old, new;
6695 c = atomic_read(v);
6696 for (;;) {
6697- if (unlikely(c == (u)))
6698+ if (unlikely(c == u))
6699 break;
6700- old = atomic_cmpxchg((v), c, c + (a));
6701+
6702+ asm volatile("addl %2,%0\n"
6703+
6704+#ifdef CONFIG_PAX_REFCOUNT
6705+ "jno 0f\n"
6706+ "subl %2,%0\n"
6707+ "int $4\n0:\n"
6708+ _ASM_EXTABLE(0b, 0b)
6709+#endif
6710+
6711+ : "=r" (new)
6712+ : "0" (c), "ir" (a));
6713+
6714+ old = atomic_cmpxchg(v, c, new);
6715 if (likely(old == c))
6716 break;
6717 c = old;
6718 }
6719- return c != (u);
6720+ return c != u;
6721 }
6722
6723 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6724
6725+/**
6726+ * atomic_inc_not_zero_hint - increment if not null
6727+ * @v: pointer of type atomic_t
6728+ * @hint: probable value of the atomic before the increment
6729+ *
6730+ * This version of atomic_inc_not_zero() gives a hint of probable
6731+ * value of the atomic. This helps processor to not read the memory
6732+ * before doing the atomic read/modify/write cycle, lowering
6733+ * number of bus transactions on some arches.
6734+ *
6735+ * Returns: 0 if increment was not done, 1 otherwise.
6736+ */
6737+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6738+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6739+{
6740+ int val, c = hint, new;
6741+
6742+ /* sanity test, should be removed by compiler if hint is a constant */
6743+ if (!hint)
6744+ return atomic_inc_not_zero(v);
6745+
6746+ do {
6747+ asm volatile("incl %0\n"
6748+
6749+#ifdef CONFIG_PAX_REFCOUNT
6750+ "jno 0f\n"
6751+ "decl %0\n"
6752+ "int $4\n0:\n"
6753+ _ASM_EXTABLE(0b, 0b)
6754+#endif
6755+
6756+ : "=r" (new)
6757+ : "0" (c));
6758+
6759+ val = atomic_cmpxchg(v, c, new);
6760+ if (val == c)
6761+ return 1;
6762+ c = val;
6763+ } while (c);
6764+
6765+ return 0;
6766+}
6767+
6768 /*
6769 * atomic_dec_if_positive - decrement by 1 if old value positive
6770 * @v: pointer of type atomic_t
6771diff -urNp linux-3.0.4/arch/x86/include/asm/bitops.h linux-3.0.4/arch/x86/include/asm/bitops.h
6772--- linux-3.0.4/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
6773+++ linux-3.0.4/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
6774@@ -38,7 +38,7 @@
6775 * a mask operation on a byte.
6776 */
6777 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6778-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6779+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6780 #define CONST_MASK(nr) (1 << ((nr) & 7))
6781
6782 /**
6783diff -urNp linux-3.0.4/arch/x86/include/asm/boot.h linux-3.0.4/arch/x86/include/asm/boot.h
6784--- linux-3.0.4/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
6785+++ linux-3.0.4/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
6786@@ -11,10 +11,15 @@
6787 #include <asm/pgtable_types.h>
6788
6789 /* Physical address where kernel should be loaded. */
6790-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6791+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6792 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6793 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6794
6795+#ifndef __ASSEMBLY__
6796+extern unsigned char __LOAD_PHYSICAL_ADDR[];
6797+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6798+#endif
6799+
6800 /* Minimum kernel alignment, as a power of two */
6801 #ifdef CONFIG_X86_64
6802 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6803diff -urNp linux-3.0.4/arch/x86/include/asm/cacheflush.h linux-3.0.4/arch/x86/include/asm/cacheflush.h
6804--- linux-3.0.4/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
6805+++ linux-3.0.4/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
6806@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6807 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6808
6809 if (pg_flags == _PGMT_DEFAULT)
6810- return -1;
6811+ return ~0UL;
6812 else if (pg_flags == _PGMT_WC)
6813 return _PAGE_CACHE_WC;
6814 else if (pg_flags == _PGMT_UC_MINUS)
6815diff -urNp linux-3.0.4/arch/x86/include/asm/cache.h linux-3.0.4/arch/x86/include/asm/cache.h
6816--- linux-3.0.4/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
6817+++ linux-3.0.4/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
6818@@ -5,12 +5,13 @@
6819
6820 /* L1 cache line size */
6821 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6822-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6823+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6824
6825 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6826+#define __read_only __attribute__((__section__(".data..read_only")))
6827
6828 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6829-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6830+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6831
6832 #ifdef CONFIG_X86_VSMP
6833 #ifdef CONFIG_SMP
6834diff -urNp linux-3.0.4/arch/x86/include/asm/checksum_32.h linux-3.0.4/arch/x86/include/asm/checksum_32.h
6835--- linux-3.0.4/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
6836+++ linux-3.0.4/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
6837@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6838 int len, __wsum sum,
6839 int *src_err_ptr, int *dst_err_ptr);
6840
6841+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6842+ int len, __wsum sum,
6843+ int *src_err_ptr, int *dst_err_ptr);
6844+
6845+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6846+ int len, __wsum sum,
6847+ int *src_err_ptr, int *dst_err_ptr);
6848+
6849 /*
6850 * Note: when you get a NULL pointer exception here this means someone
6851 * passed in an incorrect kernel address to one of these functions.
6852@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6853 int *err_ptr)
6854 {
6855 might_sleep();
6856- return csum_partial_copy_generic((__force void *)src, dst,
6857+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
6858 len, sum, err_ptr, NULL);
6859 }
6860
6861@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6862 {
6863 might_sleep();
6864 if (access_ok(VERIFY_WRITE, dst, len))
6865- return csum_partial_copy_generic(src, (__force void *)dst,
6866+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6867 len, sum, NULL, err_ptr);
6868
6869 if (len)
6870diff -urNp linux-3.0.4/arch/x86/include/asm/cpufeature.h linux-3.0.4/arch/x86/include/asm/cpufeature.h
6871--- linux-3.0.4/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
6872+++ linux-3.0.4/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
6873@@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
6874 ".section .discard,\"aw\",@progbits\n"
6875 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6876 ".previous\n"
6877- ".section .altinstr_replacement,\"ax\"\n"
6878+ ".section .altinstr_replacement,\"a\"\n"
6879 "3: movb $1,%0\n"
6880 "4:\n"
6881 ".previous\n"
6882diff -urNp linux-3.0.4/arch/x86/include/asm/desc_defs.h linux-3.0.4/arch/x86/include/asm/desc_defs.h
6883--- linux-3.0.4/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
6884+++ linux-3.0.4/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
6885@@ -31,6 +31,12 @@ struct desc_struct {
6886 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6887 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6888 };
6889+ struct {
6890+ u16 offset_low;
6891+ u16 seg;
6892+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6893+ unsigned offset_high: 16;
6894+ } gate;
6895 };
6896 } __attribute__((packed));
6897
6898diff -urNp linux-3.0.4/arch/x86/include/asm/desc.h linux-3.0.4/arch/x86/include/asm/desc.h
6899--- linux-3.0.4/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
6900+++ linux-3.0.4/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
6901@@ -4,6 +4,7 @@
6902 #include <asm/desc_defs.h>
6903 #include <asm/ldt.h>
6904 #include <asm/mmu.h>
6905+#include <asm/pgtable.h>
6906
6907 #include <linux/smp.h>
6908
6909@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
6910
6911 desc->type = (info->read_exec_only ^ 1) << 1;
6912 desc->type |= info->contents << 2;
6913+ desc->type |= info->seg_not_present ^ 1;
6914
6915 desc->s = 1;
6916 desc->dpl = 0x3;
6917@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
6918 }
6919
6920 extern struct desc_ptr idt_descr;
6921-extern gate_desc idt_table[];
6922-
6923-struct gdt_page {
6924- struct desc_struct gdt[GDT_ENTRIES];
6925-} __attribute__((aligned(PAGE_SIZE)));
6926-
6927-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6928+extern gate_desc idt_table[256];
6929
6930+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6931 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6932 {
6933- return per_cpu(gdt_page, cpu).gdt;
6934+ return cpu_gdt_table[cpu];
6935 }
6936
6937 #ifdef CONFIG_X86_64
6938@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
6939 unsigned long base, unsigned dpl, unsigned flags,
6940 unsigned short seg)
6941 {
6942- gate->a = (seg << 16) | (base & 0xffff);
6943- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
6944+ gate->gate.offset_low = base;
6945+ gate->gate.seg = seg;
6946+ gate->gate.reserved = 0;
6947+ gate->gate.type = type;
6948+ gate->gate.s = 0;
6949+ gate->gate.dpl = dpl;
6950+ gate->gate.p = 1;
6951+ gate->gate.offset_high = base >> 16;
6952 }
6953
6954 #endif
6955@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
6956
6957 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
6958 {
6959+ pax_open_kernel();
6960 memcpy(&idt[entry], gate, sizeof(*gate));
6961+ pax_close_kernel();
6962 }
6963
6964 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
6965 {
6966+ pax_open_kernel();
6967 memcpy(&ldt[entry], desc, 8);
6968+ pax_close_kernel();
6969 }
6970
6971 static inline void
6972@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
6973 default: size = sizeof(*gdt); break;
6974 }
6975
6976+ pax_open_kernel();
6977 memcpy(&gdt[entry], desc, size);
6978+ pax_close_kernel();
6979 }
6980
6981 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
6982@@ -207,7 +216,9 @@ static inline void native_set_ldt(const
6983
6984 static inline void native_load_tr_desc(void)
6985 {
6986+ pax_open_kernel();
6987 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
6988+ pax_close_kernel();
6989 }
6990
6991 static inline void native_load_gdt(const struct desc_ptr *dtr)
6992@@ -244,8 +255,10 @@ static inline void native_load_tls(struc
6993 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
6994 unsigned int i;
6995
6996+ pax_open_kernel();
6997 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
6998 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
6999+ pax_close_kernel();
7000 }
7001
7002 #define _LDT_empty(info) \
7003@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7004 desc->limit = (limit >> 16) & 0xf;
7005 }
7006
7007-static inline void _set_gate(int gate, unsigned type, void *addr,
7008+static inline void _set_gate(int gate, unsigned type, const void *addr,
7009 unsigned dpl, unsigned ist, unsigned seg)
7010 {
7011 gate_desc s;
7012@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7013 * Pentium F0 0F bugfix can have resulted in the mapped
7014 * IDT being write-protected.
7015 */
7016-static inline void set_intr_gate(unsigned int n, void *addr)
7017+static inline void set_intr_gate(unsigned int n, const void *addr)
7018 {
7019 BUG_ON((unsigned)n > 0xFF);
7020 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7021@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7022 /*
7023 * This routine sets up an interrupt gate at directory privilege level 3.
7024 */
7025-static inline void set_system_intr_gate(unsigned int n, void *addr)
7026+static inline void set_system_intr_gate(unsigned int n, const void *addr)
7027 {
7028 BUG_ON((unsigned)n > 0xFF);
7029 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7030 }
7031
7032-static inline void set_system_trap_gate(unsigned int n, void *addr)
7033+static inline void set_system_trap_gate(unsigned int n, const void *addr)
7034 {
7035 BUG_ON((unsigned)n > 0xFF);
7036 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7037 }
7038
7039-static inline void set_trap_gate(unsigned int n, void *addr)
7040+static inline void set_trap_gate(unsigned int n, const void *addr)
7041 {
7042 BUG_ON((unsigned)n > 0xFF);
7043 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7044@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7045 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7046 {
7047 BUG_ON((unsigned)n > 0xFF);
7048- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7049+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7050 }
7051
7052-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7053+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7054 {
7055 BUG_ON((unsigned)n > 0xFF);
7056 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7057 }
7058
7059-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7060+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7061 {
7062 BUG_ON((unsigned)n > 0xFF);
7063 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7064 }
7065
7066+#ifdef CONFIG_X86_32
7067+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7068+{
7069+ struct desc_struct d;
7070+
7071+ if (likely(limit))
7072+ limit = (limit - 1UL) >> PAGE_SHIFT;
7073+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
7074+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7075+}
7076+#endif
7077+
7078 #endif /* _ASM_X86_DESC_H */
7079diff -urNp linux-3.0.4/arch/x86/include/asm/e820.h linux-3.0.4/arch/x86/include/asm/e820.h
7080--- linux-3.0.4/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7081+++ linux-3.0.4/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7082@@ -69,7 +69,7 @@ struct e820map {
7083 #define ISA_START_ADDRESS 0xa0000
7084 #define ISA_END_ADDRESS 0x100000
7085
7086-#define BIOS_BEGIN 0x000a0000
7087+#define BIOS_BEGIN 0x000c0000
7088 #define BIOS_END 0x00100000
7089
7090 #define BIOS_ROM_BASE 0xffe00000
7091diff -urNp linux-3.0.4/arch/x86/include/asm/elf.h linux-3.0.4/arch/x86/include/asm/elf.h
7092--- linux-3.0.4/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7093+++ linux-3.0.4/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7094@@ -237,7 +237,25 @@ extern int force_personality32;
7095 the loader. We need to make sure that it is out of the way of the program
7096 that it will "exec", and that there is sufficient room for the brk. */
7097
7098+#ifdef CONFIG_PAX_SEGMEXEC
7099+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7100+#else
7101 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7102+#endif
7103+
7104+#ifdef CONFIG_PAX_ASLR
7105+#ifdef CONFIG_X86_32
7106+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7107+
7108+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7109+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7110+#else
7111+#define PAX_ELF_ET_DYN_BASE 0x400000UL
7112+
7113+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7114+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7115+#endif
7116+#endif
7117
7118 /* This yields a mask that user programs can use to figure out what
7119 instruction set this CPU supports. This could be done in user space,
7120@@ -290,9 +308,7 @@ do { \
7121
7122 #define ARCH_DLINFO \
7123 do { \
7124- if (vdso_enabled) \
7125- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7126- (unsigned long)current->mm->context.vdso); \
7127+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7128 } while (0)
7129
7130 #define AT_SYSINFO 32
7131@@ -303,7 +319,7 @@ do { \
7132
7133 #endif /* !CONFIG_X86_32 */
7134
7135-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7136+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7137
7138 #define VDSO_ENTRY \
7139 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7140@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7141 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7142 #define compat_arch_setup_additional_pages syscall32_setup_pages
7143
7144-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7145-#define arch_randomize_brk arch_randomize_brk
7146-
7147 #endif /* _ASM_X86_ELF_H */
7148diff -urNp linux-3.0.4/arch/x86/include/asm/emergency-restart.h linux-3.0.4/arch/x86/include/asm/emergency-restart.h
7149--- linux-3.0.4/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7150+++ linux-3.0.4/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7151@@ -15,6 +15,6 @@ enum reboot_type {
7152
7153 extern enum reboot_type reboot_type;
7154
7155-extern void machine_emergency_restart(void);
7156+extern void machine_emergency_restart(void) __noreturn;
7157
7158 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7159diff -urNp linux-3.0.4/arch/x86/include/asm/futex.h linux-3.0.4/arch/x86/include/asm/futex.h
7160--- linux-3.0.4/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7161+++ linux-3.0.4/arch/x86/include/asm/futex.h 2011-08-23 21:47:55.000000000 -0400
7162@@ -12,16 +12,18 @@
7163 #include <asm/system.h>
7164
7165 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7166+ typecheck(u32 *, uaddr); \
7167 asm volatile("1:\t" insn "\n" \
7168 "2:\t.section .fixup,\"ax\"\n" \
7169 "3:\tmov\t%3, %1\n" \
7170 "\tjmp\t2b\n" \
7171 "\t.previous\n" \
7172 _ASM_EXTABLE(1b, 3b) \
7173- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7174+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7175 : "i" (-EFAULT), "0" (oparg), "1" (0))
7176
7177 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7178+ typecheck(u32 *, uaddr); \
7179 asm volatile("1:\tmovl %2, %0\n" \
7180 "\tmovl\t%0, %3\n" \
7181 "\t" insn "\n" \
7182@@ -34,7 +36,7 @@
7183 _ASM_EXTABLE(1b, 4b) \
7184 _ASM_EXTABLE(2b, 4b) \
7185 : "=&a" (oldval), "=&r" (ret), \
7186- "+m" (*uaddr), "=&r" (tem) \
7187+ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7188 : "r" (oparg), "i" (-EFAULT), "1" (0))
7189
7190 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7191@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7192
7193 switch (op) {
7194 case FUTEX_OP_SET:
7195- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7196+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7197 break;
7198 case FUTEX_OP_ADD:
7199- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7200+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7201 uaddr, oparg);
7202 break;
7203 case FUTEX_OP_OR:
7204@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7205 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7206 return -EFAULT;
7207
7208- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7209+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7210 "2:\t.section .fixup, \"ax\"\n"
7211 "3:\tmov %3, %0\n"
7212 "\tjmp 2b\n"
7213 "\t.previous\n"
7214 _ASM_EXTABLE(1b, 3b)
7215- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7216+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7217 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7218 : "memory"
7219 );
7220diff -urNp linux-3.0.4/arch/x86/include/asm/hw_irq.h linux-3.0.4/arch/x86/include/asm/hw_irq.h
7221--- linux-3.0.4/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7222+++ linux-3.0.4/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7223@@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7224 extern void enable_IO_APIC(void);
7225
7226 /* Statistics */
7227-extern atomic_t irq_err_count;
7228-extern atomic_t irq_mis_count;
7229+extern atomic_unchecked_t irq_err_count;
7230+extern atomic_unchecked_t irq_mis_count;
7231
7232 /* EISA */
7233 extern void eisa_set_level_irq(unsigned int irq);
7234diff -urNp linux-3.0.4/arch/x86/include/asm/i387.h linux-3.0.4/arch/x86/include/asm/i387.h
7235--- linux-3.0.4/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7236+++ linux-3.0.4/arch/x86/include/asm/i387.h 2011-08-23 21:47:55.000000000 -0400
7237@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7238 {
7239 int err;
7240
7241+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7242+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7243+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7244+#endif
7245+
7246 /* See comment in fxsave() below. */
7247 #ifdef CONFIG_AS_FXSAVEQ
7248 asm volatile("1: fxrstorq %[fx]\n\t"
7249@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7250 {
7251 int err;
7252
7253+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7254+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7255+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7256+#endif
7257+
7258 /*
7259 * Clear the bytes not touched by the fxsave and reserved
7260 * for the SW usage.
7261@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7262 #endif /* CONFIG_X86_64 */
7263
7264 /* We need a safe address that is cheap to find and that is already
7265- in L1 during context switch. The best choices are unfortunately
7266- different for UP and SMP */
7267-#ifdef CONFIG_SMP
7268-#define safe_address (__per_cpu_offset[0])
7269-#else
7270-#define safe_address (kstat_cpu(0).cpustat.user)
7271-#endif
7272+ in L1 during context switch. */
7273+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7274
7275 /*
7276 * These must be called with preempt disabled
7277@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7278 struct thread_info *me = current_thread_info();
7279 preempt_disable();
7280 if (me->status & TS_USEDFPU)
7281- __save_init_fpu(me->task);
7282+ __save_init_fpu(current);
7283 else
7284 clts();
7285 }
7286diff -urNp linux-3.0.4/arch/x86/include/asm/io.h linux-3.0.4/arch/x86/include/asm/io.h
7287--- linux-3.0.4/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7288+++ linux-3.0.4/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7289@@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7290
7291 #include <linux/vmalloc.h>
7292
7293+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7294+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7295+{
7296+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7297+}
7298+
7299+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7300+{
7301+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7302+}
7303+
7304 /*
7305 * Convert a virtual cached pointer to an uncached pointer
7306 */
7307diff -urNp linux-3.0.4/arch/x86/include/asm/irqflags.h linux-3.0.4/arch/x86/include/asm/irqflags.h
7308--- linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7309+++ linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7310@@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7311 sti; \
7312 sysexit
7313
7314+#define GET_CR0_INTO_RDI mov %cr0, %rdi
7315+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7316+#define GET_CR3_INTO_RDI mov %cr3, %rdi
7317+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7318+
7319 #else
7320 #define INTERRUPT_RETURN iret
7321 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7322diff -urNp linux-3.0.4/arch/x86/include/asm/kprobes.h linux-3.0.4/arch/x86/include/asm/kprobes.h
7323--- linux-3.0.4/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
7324+++ linux-3.0.4/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
7325@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7326 #define RELATIVEJUMP_SIZE 5
7327 #define RELATIVECALL_OPCODE 0xe8
7328 #define RELATIVE_ADDR_SIZE 4
7329-#define MAX_STACK_SIZE 64
7330-#define MIN_STACK_SIZE(ADDR) \
7331- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7332- THREAD_SIZE - (unsigned long)(ADDR))) \
7333- ? (MAX_STACK_SIZE) \
7334- : (((unsigned long)current_thread_info()) + \
7335- THREAD_SIZE - (unsigned long)(ADDR)))
7336+#define MAX_STACK_SIZE 64UL
7337+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7338
7339 #define flush_insn_slot(p) do { } while (0)
7340
7341diff -urNp linux-3.0.4/arch/x86/include/asm/kvm_host.h linux-3.0.4/arch/x86/include/asm/kvm_host.h
7342--- linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7343+++ linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-08-26 19:49:56.000000000 -0400
7344@@ -441,7 +441,7 @@ struct kvm_arch {
7345 unsigned int n_used_mmu_pages;
7346 unsigned int n_requested_mmu_pages;
7347 unsigned int n_max_mmu_pages;
7348- atomic_t invlpg_counter;
7349+ atomic_unchecked_t invlpg_counter;
7350 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7351 /*
7352 * Hash table of struct kvm_mmu_page.
7353@@ -619,7 +619,7 @@ struct kvm_x86_ops {
7354 enum x86_intercept_stage stage);
7355
7356 const struct trace_print_flags *exit_reasons_str;
7357-};
7358+} __do_const;
7359
7360 struct kvm_arch_async_pf {
7361 u32 token;
7362diff -urNp linux-3.0.4/arch/x86/include/asm/local.h linux-3.0.4/arch/x86/include/asm/local.h
7363--- linux-3.0.4/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
7364+++ linux-3.0.4/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
7365@@ -18,26 +18,58 @@ typedef struct {
7366
7367 static inline void local_inc(local_t *l)
7368 {
7369- asm volatile(_ASM_INC "%0"
7370+ asm volatile(_ASM_INC "%0\n"
7371+
7372+#ifdef CONFIG_PAX_REFCOUNT
7373+ "jno 0f\n"
7374+ _ASM_DEC "%0\n"
7375+ "int $4\n0:\n"
7376+ _ASM_EXTABLE(0b, 0b)
7377+#endif
7378+
7379 : "+m" (l->a.counter));
7380 }
7381
7382 static inline void local_dec(local_t *l)
7383 {
7384- asm volatile(_ASM_DEC "%0"
7385+ asm volatile(_ASM_DEC "%0\n"
7386+
7387+#ifdef CONFIG_PAX_REFCOUNT
7388+ "jno 0f\n"
7389+ _ASM_INC "%0\n"
7390+ "int $4\n0:\n"
7391+ _ASM_EXTABLE(0b, 0b)
7392+#endif
7393+
7394 : "+m" (l->a.counter));
7395 }
7396
7397 static inline void local_add(long i, local_t *l)
7398 {
7399- asm volatile(_ASM_ADD "%1,%0"
7400+ asm volatile(_ASM_ADD "%1,%0\n"
7401+
7402+#ifdef CONFIG_PAX_REFCOUNT
7403+ "jno 0f\n"
7404+ _ASM_SUB "%1,%0\n"
7405+ "int $4\n0:\n"
7406+ _ASM_EXTABLE(0b, 0b)
7407+#endif
7408+
7409 : "+m" (l->a.counter)
7410 : "ir" (i));
7411 }
7412
7413 static inline void local_sub(long i, local_t *l)
7414 {
7415- asm volatile(_ASM_SUB "%1,%0"
7416+ asm volatile(_ASM_SUB "%1,%0\n"
7417+
7418+#ifdef CONFIG_PAX_REFCOUNT
7419+ "jno 0f\n"
7420+ _ASM_ADD "%1,%0\n"
7421+ "int $4\n0:\n"
7422+ _ASM_EXTABLE(0b, 0b)
7423+#endif
7424+
7425 : "+m" (l->a.counter)
7426 : "ir" (i));
7427 }
7428@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7429 {
7430 unsigned char c;
7431
7432- asm volatile(_ASM_SUB "%2,%0; sete %1"
7433+ asm volatile(_ASM_SUB "%2,%0\n"
7434+
7435+#ifdef CONFIG_PAX_REFCOUNT
7436+ "jno 0f\n"
7437+ _ASM_ADD "%2,%0\n"
7438+ "int $4\n0:\n"
7439+ _ASM_EXTABLE(0b, 0b)
7440+#endif
7441+
7442+ "sete %1\n"
7443 : "+m" (l->a.counter), "=qm" (c)
7444 : "ir" (i) : "memory");
7445 return c;
7446@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7447 {
7448 unsigned char c;
7449
7450- asm volatile(_ASM_DEC "%0; sete %1"
7451+ asm volatile(_ASM_DEC "%0\n"
7452+
7453+#ifdef CONFIG_PAX_REFCOUNT
7454+ "jno 0f\n"
7455+ _ASM_INC "%0\n"
7456+ "int $4\n0:\n"
7457+ _ASM_EXTABLE(0b, 0b)
7458+#endif
7459+
7460+ "sete %1\n"
7461 : "+m" (l->a.counter), "=qm" (c)
7462 : : "memory");
7463 return c != 0;
7464@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7465 {
7466 unsigned char c;
7467
7468- asm volatile(_ASM_INC "%0; sete %1"
7469+ asm volatile(_ASM_INC "%0\n"
7470+
7471+#ifdef CONFIG_PAX_REFCOUNT
7472+ "jno 0f\n"
7473+ _ASM_DEC "%0\n"
7474+ "int $4\n0:\n"
7475+ _ASM_EXTABLE(0b, 0b)
7476+#endif
7477+
7478+ "sete %1\n"
7479 : "+m" (l->a.counter), "=qm" (c)
7480 : : "memory");
7481 return c != 0;
7482@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7483 {
7484 unsigned char c;
7485
7486- asm volatile(_ASM_ADD "%2,%0; sets %1"
7487+ asm volatile(_ASM_ADD "%2,%0\n"
7488+
7489+#ifdef CONFIG_PAX_REFCOUNT
7490+ "jno 0f\n"
7491+ _ASM_SUB "%2,%0\n"
7492+ "int $4\n0:\n"
7493+ _ASM_EXTABLE(0b, 0b)
7494+#endif
7495+
7496+ "sets %1\n"
7497 : "+m" (l->a.counter), "=qm" (c)
7498 : "ir" (i) : "memory");
7499 return c;
7500@@ -133,7 +201,15 @@ static inline long local_add_return(long
7501 #endif
7502 /* Modern 486+ processor */
7503 __i = i;
7504- asm volatile(_ASM_XADD "%0, %1;"
7505+ asm volatile(_ASM_XADD "%0, %1\n"
7506+
7507+#ifdef CONFIG_PAX_REFCOUNT
7508+ "jno 0f\n"
7509+ _ASM_MOV "%0,%1\n"
7510+ "int $4\n0:\n"
7511+ _ASM_EXTABLE(0b, 0b)
7512+#endif
7513+
7514 : "+r" (i), "+m" (l->a.counter)
7515 : : "memory");
7516 return i + __i;
7517diff -urNp linux-3.0.4/arch/x86/include/asm/mman.h linux-3.0.4/arch/x86/include/asm/mman.h
7518--- linux-3.0.4/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
7519+++ linux-3.0.4/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
7520@@ -5,4 +5,14 @@
7521
7522 #include <asm-generic/mman.h>
7523
7524+#ifdef __KERNEL__
7525+#ifndef __ASSEMBLY__
7526+#ifdef CONFIG_X86_32
7527+#define arch_mmap_check i386_mmap_check
7528+int i386_mmap_check(unsigned long addr, unsigned long len,
7529+ unsigned long flags);
7530+#endif
7531+#endif
7532+#endif
7533+
7534 #endif /* _ASM_X86_MMAN_H */
7535diff -urNp linux-3.0.4/arch/x86/include/asm/mmu_context.h linux-3.0.4/arch/x86/include/asm/mmu_context.h
7536--- linux-3.0.4/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
7537+++ linux-3.0.4/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
7538@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7539
7540 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7541 {
7542+
7543+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7544+ unsigned int i;
7545+ pgd_t *pgd;
7546+
7547+ pax_open_kernel();
7548+ pgd = get_cpu_pgd(smp_processor_id());
7549+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7550+ set_pgd_batched(pgd+i, native_make_pgd(0));
7551+ pax_close_kernel();
7552+#endif
7553+
7554 #ifdef CONFIG_SMP
7555 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7556 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7557@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7558 struct task_struct *tsk)
7559 {
7560 unsigned cpu = smp_processor_id();
7561+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7562+ int tlbstate = TLBSTATE_OK;
7563+#endif
7564
7565 if (likely(prev != next)) {
7566 #ifdef CONFIG_SMP
7567+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7568+ tlbstate = percpu_read(cpu_tlbstate.state);
7569+#endif
7570 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7571 percpu_write(cpu_tlbstate.active_mm, next);
7572 #endif
7573 cpumask_set_cpu(cpu, mm_cpumask(next));
7574
7575 /* Re-load page tables */
7576+#ifdef CONFIG_PAX_PER_CPU_PGD
7577+ pax_open_kernel();
7578+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7579+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7580+ pax_close_kernel();
7581+ load_cr3(get_cpu_pgd(cpu));
7582+#else
7583 load_cr3(next->pgd);
7584+#endif
7585
7586 /* stop flush ipis for the previous mm */
7587 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7588@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7589 */
7590 if (unlikely(prev->context.ldt != next->context.ldt))
7591 load_LDT_nolock(&next->context);
7592- }
7593+
7594+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7595+ if (!(__supported_pte_mask & _PAGE_NX)) {
7596+ smp_mb__before_clear_bit();
7597+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7598+ smp_mb__after_clear_bit();
7599+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7600+ }
7601+#endif
7602+
7603+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7604+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7605+ prev->context.user_cs_limit != next->context.user_cs_limit))
7606+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7607 #ifdef CONFIG_SMP
7608+ else if (unlikely(tlbstate != TLBSTATE_OK))
7609+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7610+#endif
7611+#endif
7612+
7613+ }
7614 else {
7615+
7616+#ifdef CONFIG_PAX_PER_CPU_PGD
7617+ pax_open_kernel();
7618+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7619+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7620+ pax_close_kernel();
7621+ load_cr3(get_cpu_pgd(cpu));
7622+#endif
7623+
7624+#ifdef CONFIG_SMP
7625 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7626 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7627
7628@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7629 * tlb flush IPI delivery. We must reload CR3
7630 * to make sure to use no freed page tables.
7631 */
7632+
7633+#ifndef CONFIG_PAX_PER_CPU_PGD
7634 load_cr3(next->pgd);
7635+#endif
7636+
7637 load_LDT_nolock(&next->context);
7638+
7639+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7640+ if (!(__supported_pte_mask & _PAGE_NX))
7641+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7642+#endif
7643+
7644+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7645+#ifdef CONFIG_PAX_PAGEEXEC
7646+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7647+#endif
7648+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7649+#endif
7650+
7651 }
7652- }
7653 #endif
7654+ }
7655 }
7656
7657 #define activate_mm(prev, next) \
7658diff -urNp linux-3.0.4/arch/x86/include/asm/mmu.h linux-3.0.4/arch/x86/include/asm/mmu.h
7659--- linux-3.0.4/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
7660+++ linux-3.0.4/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
7661@@ -9,7 +9,7 @@
7662 * we put the segment information here.
7663 */
7664 typedef struct {
7665- void *ldt;
7666+ struct desc_struct *ldt;
7667 int size;
7668
7669 #ifdef CONFIG_X86_64
7670@@ -18,7 +18,19 @@ typedef struct {
7671 #endif
7672
7673 struct mutex lock;
7674- void *vdso;
7675+ unsigned long vdso;
7676+
7677+#ifdef CONFIG_X86_32
7678+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7679+ unsigned long user_cs_base;
7680+ unsigned long user_cs_limit;
7681+
7682+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7683+ cpumask_t cpu_user_cs_mask;
7684+#endif
7685+
7686+#endif
7687+#endif
7688 } mm_context_t;
7689
7690 #ifdef CONFIG_SMP
7691diff -urNp linux-3.0.4/arch/x86/include/asm/module.h linux-3.0.4/arch/x86/include/asm/module.h
7692--- linux-3.0.4/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
7693+++ linux-3.0.4/arch/x86/include/asm/module.h 2011-08-23 21:48:14.000000000 -0400
7694@@ -5,6 +5,7 @@
7695
7696 #ifdef CONFIG_X86_64
7697 /* X86_64 does not define MODULE_PROC_FAMILY */
7698+#define MODULE_PROC_FAMILY ""
7699 #elif defined CONFIG_M386
7700 #define MODULE_PROC_FAMILY "386 "
7701 #elif defined CONFIG_M486
7702@@ -59,8 +60,30 @@
7703 #error unknown processor family
7704 #endif
7705
7706-#ifdef CONFIG_X86_32
7707-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7708+#ifdef CONFIG_PAX_MEMORY_UDEREF
7709+#define MODULE_PAX_UDEREF "UDEREF "
7710+#else
7711+#define MODULE_PAX_UDEREF ""
7712+#endif
7713+
7714+#ifdef CONFIG_PAX_KERNEXEC
7715+#define MODULE_PAX_KERNEXEC "KERNEXEC "
7716+#else
7717+#define MODULE_PAX_KERNEXEC ""
7718 #endif
7719
7720+#ifdef CONFIG_PAX_REFCOUNT
7721+#define MODULE_PAX_REFCOUNT "REFCOUNT "
7722+#else
7723+#define MODULE_PAX_REFCOUNT ""
7724+#endif
7725+
7726+#ifdef CONFIG_GRKERNSEC
7727+#define MODULE_GRSEC "GRSECURITY "
7728+#else
7729+#define MODULE_GRSEC ""
7730+#endif
7731+
7732+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7733+
7734 #endif /* _ASM_X86_MODULE_H */
7735diff -urNp linux-3.0.4/arch/x86/include/asm/page_64_types.h linux-3.0.4/arch/x86/include/asm/page_64_types.h
7736--- linux-3.0.4/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
7737+++ linux-3.0.4/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
7738@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7739
7740 /* duplicated to the one in bootmem.h */
7741 extern unsigned long max_pfn;
7742-extern unsigned long phys_base;
7743+extern const unsigned long phys_base;
7744
7745 extern unsigned long __phys_addr(unsigned long);
7746 #define __phys_reloc_hide(x) (x)
7747diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt.h linux-3.0.4/arch/x86/include/asm/paravirt.h
7748--- linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7749+++ linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7750@@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
7751 val);
7752 }
7753
7754+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7755+{
7756+ pgdval_t val = native_pgd_val(pgd);
7757+
7758+ if (sizeof(pgdval_t) > sizeof(long))
7759+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7760+ val, (u64)val >> 32);
7761+ else
7762+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7763+ val);
7764+}
7765+
7766 static inline void pgd_clear(pgd_t *pgdp)
7767 {
7768 set_pgd(pgdp, __pgd(0));
7769@@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
7770 pv_mmu_ops.set_fixmap(idx, phys, flags);
7771 }
7772
7773+#ifdef CONFIG_PAX_KERNEXEC
7774+static inline unsigned long pax_open_kernel(void)
7775+{
7776+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7777+}
7778+
7779+static inline unsigned long pax_close_kernel(void)
7780+{
7781+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7782+}
7783+#else
7784+static inline unsigned long pax_open_kernel(void) { return 0; }
7785+static inline unsigned long pax_close_kernel(void) { return 0; }
7786+#endif
7787+
7788 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7789
7790 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7791@@ -955,7 +982,7 @@ extern void default_banner(void);
7792
7793 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7794 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7795-#define PARA_INDIRECT(addr) *%cs:addr
7796+#define PARA_INDIRECT(addr) *%ss:addr
7797 #endif
7798
7799 #define INTERRUPT_RETURN \
7800@@ -1032,6 +1059,21 @@ extern void default_banner(void);
7801 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7802 CLBR_NONE, \
7803 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7804+
7805+#define GET_CR0_INTO_RDI \
7806+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7807+ mov %rax,%rdi
7808+
7809+#define SET_RDI_INTO_CR0 \
7810+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7811+
7812+#define GET_CR3_INTO_RDI \
7813+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7814+ mov %rax,%rdi
7815+
7816+#define SET_RDI_INTO_CR3 \
7817+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7818+
7819 #endif /* CONFIG_X86_32 */
7820
7821 #endif /* __ASSEMBLY__ */
7822diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt_types.h linux-3.0.4/arch/x86/include/asm/paravirt_types.h
7823--- linux-3.0.4/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
7824+++ linux-3.0.4/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
7825@@ -78,19 +78,19 @@ struct pv_init_ops {
7826 */
7827 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7828 unsigned long addr, unsigned len);
7829-};
7830+} __no_const;
7831
7832
7833 struct pv_lazy_ops {
7834 /* Set deferred update mode, used for batching operations. */
7835 void (*enter)(void);
7836 void (*leave)(void);
7837-};
7838+} __no_const;
7839
7840 struct pv_time_ops {
7841 unsigned long long (*sched_clock)(void);
7842 unsigned long (*get_tsc_khz)(void);
7843-};
7844+} __no_const;
7845
7846 struct pv_cpu_ops {
7847 /* hooks for various privileged instructions */
7848@@ -186,7 +186,7 @@ struct pv_cpu_ops {
7849
7850 void (*start_context_switch)(struct task_struct *prev);
7851 void (*end_context_switch)(struct task_struct *next);
7852-};
7853+} __no_const;
7854
7855 struct pv_irq_ops {
7856 /*
7857@@ -217,7 +217,7 @@ struct pv_apic_ops {
7858 unsigned long start_eip,
7859 unsigned long start_esp);
7860 #endif
7861-};
7862+} __no_const;
7863
7864 struct pv_mmu_ops {
7865 unsigned long (*read_cr2)(void);
7866@@ -306,6 +306,7 @@ struct pv_mmu_ops {
7867 struct paravirt_callee_save make_pud;
7868
7869 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
7870+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
7871 #endif /* PAGETABLE_LEVELS == 4 */
7872 #endif /* PAGETABLE_LEVELS >= 3 */
7873
7874@@ -317,6 +318,12 @@ struct pv_mmu_ops {
7875 an mfn. We can tell which is which from the index. */
7876 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7877 phys_addr_t phys, pgprot_t flags);
7878+
7879+#ifdef CONFIG_PAX_KERNEXEC
7880+ unsigned long (*pax_open_kernel)(void);
7881+ unsigned long (*pax_close_kernel)(void);
7882+#endif
7883+
7884 };
7885
7886 struct arch_spinlock;
7887@@ -327,7 +334,7 @@ struct pv_lock_ops {
7888 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7889 int (*spin_trylock)(struct arch_spinlock *lock);
7890 void (*spin_unlock)(struct arch_spinlock *lock);
7891-};
7892+} __no_const;
7893
7894 /* This contains all the paravirt structures: we get a convenient
7895 * number for each function using the offset which we use to indicate
7896diff -urNp linux-3.0.4/arch/x86/include/asm/pgalloc.h linux-3.0.4/arch/x86/include/asm/pgalloc.h
7897--- linux-3.0.4/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
7898+++ linux-3.0.4/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
7899@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7900 pmd_t *pmd, pte_t *pte)
7901 {
7902 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7903+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7904+}
7905+
7906+static inline void pmd_populate_user(struct mm_struct *mm,
7907+ pmd_t *pmd, pte_t *pte)
7908+{
7909+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7910 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7911 }
7912
7913diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-2level.h linux-3.0.4/arch/x86/include/asm/pgtable-2level.h
7914--- linux-3.0.4/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
7915+++ linux-3.0.4/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
7916@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7917
7918 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7919 {
7920+ pax_open_kernel();
7921 *pmdp = pmd;
7922+ pax_close_kernel();
7923 }
7924
7925 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7926diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32.h linux-3.0.4/arch/x86/include/asm/pgtable_32.h
7927--- linux-3.0.4/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
7928+++ linux-3.0.4/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
7929@@ -25,9 +25,6 @@
7930 struct mm_struct;
7931 struct vm_area_struct;
7932
7933-extern pgd_t swapper_pg_dir[1024];
7934-extern pgd_t initial_page_table[1024];
7935-
7936 static inline void pgtable_cache_init(void) { }
7937 static inline void check_pgt_cache(void) { }
7938 void paging_init(void);
7939@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
7940 # include <asm/pgtable-2level.h>
7941 #endif
7942
7943+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
7944+extern pgd_t initial_page_table[PTRS_PER_PGD];
7945+#ifdef CONFIG_X86_PAE
7946+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
7947+#endif
7948+
7949 #if defined(CONFIG_HIGHPTE)
7950 #define pte_offset_map(dir, address) \
7951 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
7952@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
7953 /* Clear a kernel PTE and flush it from the TLB */
7954 #define kpte_clear_flush(ptep, vaddr) \
7955 do { \
7956+ pax_open_kernel(); \
7957 pte_clear(&init_mm, (vaddr), (ptep)); \
7958+ pax_close_kernel(); \
7959 __flush_tlb_one((vaddr)); \
7960 } while (0)
7961
7962@@ -74,6 +79,9 @@ do { \
7963
7964 #endif /* !__ASSEMBLY__ */
7965
7966+#define HAVE_ARCH_UNMAPPED_AREA
7967+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
7968+
7969 /*
7970 * kern_addr_valid() is (1) for FLATMEM and (0) for
7971 * SPARSEMEM and DISCONTIGMEM
7972diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h
7973--- linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
7974+++ linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
7975@@ -8,7 +8,7 @@
7976 */
7977 #ifdef CONFIG_X86_PAE
7978 # include <asm/pgtable-3level_types.h>
7979-# define PMD_SIZE (1UL << PMD_SHIFT)
7980+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
7981 # define PMD_MASK (~(PMD_SIZE - 1))
7982 #else
7983 # include <asm/pgtable-2level_types.h>
7984@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
7985 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
7986 #endif
7987
7988+#ifdef CONFIG_PAX_KERNEXEC
7989+#ifndef __ASSEMBLY__
7990+extern unsigned char MODULES_EXEC_VADDR[];
7991+extern unsigned char MODULES_EXEC_END[];
7992+#endif
7993+#include <asm/boot.h>
7994+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
7995+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
7996+#else
7997+#define ktla_ktva(addr) (addr)
7998+#define ktva_ktla(addr) (addr)
7999+#endif
8000+
8001 #define MODULES_VADDR VMALLOC_START
8002 #define MODULES_END VMALLOC_END
8003 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8004diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-3level.h linux-3.0.4/arch/x86/include/asm/pgtable-3level.h
8005--- linux-3.0.4/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8006+++ linux-3.0.4/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8007@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8008
8009 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8010 {
8011+ pax_open_kernel();
8012 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8013+ pax_close_kernel();
8014 }
8015
8016 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8017 {
8018+ pax_open_kernel();
8019 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8020+ pax_close_kernel();
8021 }
8022
8023 /*
8024diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64.h linux-3.0.4/arch/x86/include/asm/pgtable_64.h
8025--- linux-3.0.4/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8026+++ linux-3.0.4/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8027@@ -16,10 +16,13 @@
8028
8029 extern pud_t level3_kernel_pgt[512];
8030 extern pud_t level3_ident_pgt[512];
8031+extern pud_t level3_vmalloc_pgt[512];
8032+extern pud_t level3_vmemmap_pgt[512];
8033+extern pud_t level2_vmemmap_pgt[512];
8034 extern pmd_t level2_kernel_pgt[512];
8035 extern pmd_t level2_fixmap_pgt[512];
8036-extern pmd_t level2_ident_pgt[512];
8037-extern pgd_t init_level4_pgt[];
8038+extern pmd_t level2_ident_pgt[512*2];
8039+extern pgd_t init_level4_pgt[512];
8040
8041 #define swapper_pg_dir init_level4_pgt
8042
8043@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8044
8045 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8046 {
8047+ pax_open_kernel();
8048 *pmdp = pmd;
8049+ pax_close_kernel();
8050 }
8051
8052 static inline void native_pmd_clear(pmd_t *pmd)
8053@@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8054
8055 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8056 {
8057+ pax_open_kernel();
8058+ *pgdp = pgd;
8059+ pax_close_kernel();
8060+}
8061+
8062+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8063+{
8064 *pgdp = pgd;
8065 }
8066
8067diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h
8068--- linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8069+++ linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8070@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8071 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8072 #define MODULES_END _AC(0xffffffffff000000, UL)
8073 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8074+#define MODULES_EXEC_VADDR MODULES_VADDR
8075+#define MODULES_EXEC_END MODULES_END
8076+
8077+#define ktla_ktva(addr) (addr)
8078+#define ktva_ktla(addr) (addr)
8079
8080 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8081diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable.h linux-3.0.4/arch/x86/include/asm/pgtable.h
8082--- linux-3.0.4/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8083+++ linux-3.0.4/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8084@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8085
8086 #ifndef __PAGETABLE_PUD_FOLDED
8087 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8088+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8089 #define pgd_clear(pgd) native_pgd_clear(pgd)
8090 #endif
8091
8092@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8093
8094 #define arch_end_context_switch(prev) do {} while(0)
8095
8096+#define pax_open_kernel() native_pax_open_kernel()
8097+#define pax_close_kernel() native_pax_close_kernel()
8098 #endif /* CONFIG_PARAVIRT */
8099
8100+#define __HAVE_ARCH_PAX_OPEN_KERNEL
8101+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8102+
8103+#ifdef CONFIG_PAX_KERNEXEC
8104+static inline unsigned long native_pax_open_kernel(void)
8105+{
8106+ unsigned long cr0;
8107+
8108+ preempt_disable();
8109+ barrier();
8110+ cr0 = read_cr0() ^ X86_CR0_WP;
8111+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
8112+ write_cr0(cr0);
8113+ return cr0 ^ X86_CR0_WP;
8114+}
8115+
8116+static inline unsigned long native_pax_close_kernel(void)
8117+{
8118+ unsigned long cr0;
8119+
8120+ cr0 = read_cr0() ^ X86_CR0_WP;
8121+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8122+ write_cr0(cr0);
8123+ barrier();
8124+ preempt_enable_no_resched();
8125+ return cr0 ^ X86_CR0_WP;
8126+}
8127+#else
8128+static inline unsigned long native_pax_open_kernel(void) { return 0; }
8129+static inline unsigned long native_pax_close_kernel(void) { return 0; }
8130+#endif
8131+
8132 /*
8133 * The following only work if pte_present() is true.
8134 * Undefined behaviour if not..
8135 */
8136+static inline int pte_user(pte_t pte)
8137+{
8138+ return pte_val(pte) & _PAGE_USER;
8139+}
8140+
8141 static inline int pte_dirty(pte_t pte)
8142 {
8143 return pte_flags(pte) & _PAGE_DIRTY;
8144@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8145 return pte_clear_flags(pte, _PAGE_RW);
8146 }
8147
8148+static inline pte_t pte_mkread(pte_t pte)
8149+{
8150+ return __pte(pte_val(pte) | _PAGE_USER);
8151+}
8152+
8153 static inline pte_t pte_mkexec(pte_t pte)
8154 {
8155- return pte_clear_flags(pte, _PAGE_NX);
8156+#ifdef CONFIG_X86_PAE
8157+ if (__supported_pte_mask & _PAGE_NX)
8158+ return pte_clear_flags(pte, _PAGE_NX);
8159+ else
8160+#endif
8161+ return pte_set_flags(pte, _PAGE_USER);
8162+}
8163+
8164+static inline pte_t pte_exprotect(pte_t pte)
8165+{
8166+#ifdef CONFIG_X86_PAE
8167+ if (__supported_pte_mask & _PAGE_NX)
8168+ return pte_set_flags(pte, _PAGE_NX);
8169+ else
8170+#endif
8171+ return pte_clear_flags(pte, _PAGE_USER);
8172 }
8173
8174 static inline pte_t pte_mkdirty(pte_t pte)
8175@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8176 #endif
8177
8178 #ifndef __ASSEMBLY__
8179+
8180+#ifdef CONFIG_PAX_PER_CPU_PGD
8181+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8182+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8183+{
8184+ return cpu_pgd[cpu];
8185+}
8186+#endif
8187+
8188 #include <linux/mm_types.h>
8189
8190 static inline int pte_none(pte_t pte)
8191@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8192
8193 static inline int pgd_bad(pgd_t pgd)
8194 {
8195- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8196+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8197 }
8198
8199 static inline int pgd_none(pgd_t pgd)
8200@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8201 * pgd_offset() returns a (pgd_t *)
8202 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8203 */
8204-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8205+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8206+
8207+#ifdef CONFIG_PAX_PER_CPU_PGD
8208+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8209+#endif
8210+
8211 /*
8212 * a shortcut which implies the use of the kernel's pgd, instead
8213 * of a process's
8214@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8215 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8216 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8217
8218+#ifdef CONFIG_X86_32
8219+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8220+#else
8221+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8222+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8223+
8224+#ifdef CONFIG_PAX_MEMORY_UDEREF
8225+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8226+#else
8227+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8228+#endif
8229+
8230+#endif
8231+
8232 #ifndef __ASSEMBLY__
8233
8234 extern int direct_gbpages;
8235@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8236 * dst and src can be on the same page, but the range must not overlap,
8237 * and must not cross a page boundary.
8238 */
8239-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8240+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8241 {
8242- memcpy(dst, src, count * sizeof(pgd_t));
8243+ pax_open_kernel();
8244+ while (count--)
8245+ *dst++ = *src++;
8246+ pax_close_kernel();
8247 }
8248
8249+#ifdef CONFIG_PAX_PER_CPU_PGD
8250+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8251+#endif
8252+
8253+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8254+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8255+#else
8256+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8257+#endif
8258
8259 #include <asm-generic/pgtable.h>
8260 #endif /* __ASSEMBLY__ */
8261diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_types.h linux-3.0.4/arch/x86/include/asm/pgtable_types.h
8262--- linux-3.0.4/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8263+++ linux-3.0.4/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8264@@ -16,13 +16,12 @@
8265 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8266 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8267 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8268-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8269+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8270 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8271 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8272 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8273-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8274-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8275-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8276+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8277+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8278 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8279
8280 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8281@@ -40,7 +39,6 @@
8282 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8283 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8284 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8285-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8286 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8287 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8288 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8289@@ -57,8 +55,10 @@
8290
8291 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8292 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8293-#else
8294+#elif defined(CONFIG_KMEMCHECK)
8295 #define _PAGE_NX (_AT(pteval_t, 0))
8296+#else
8297+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8298 #endif
8299
8300 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8301@@ -96,6 +96,9 @@
8302 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8303 _PAGE_ACCESSED)
8304
8305+#define PAGE_READONLY_NOEXEC PAGE_READONLY
8306+#define PAGE_SHARED_NOEXEC PAGE_SHARED
8307+
8308 #define __PAGE_KERNEL_EXEC \
8309 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8310 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8311@@ -106,8 +109,8 @@
8312 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8313 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8314 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8315-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8316-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8317+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8318+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8319 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8320 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8321 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8322@@ -166,8 +169,8 @@
8323 * bits are combined, this will alow user to access the high address mapped
8324 * VDSO in the presence of CONFIG_COMPAT_VDSO
8325 */
8326-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8327-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8328+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8329+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8330 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8331 #endif
8332
8333@@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8334 {
8335 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8336 }
8337+#endif
8338
8339+#if PAGETABLE_LEVELS == 3
8340+#include <asm-generic/pgtable-nopud.h>
8341+#endif
8342+
8343+#if PAGETABLE_LEVELS == 2
8344+#include <asm-generic/pgtable-nopmd.h>
8345+#endif
8346+
8347+#ifndef __ASSEMBLY__
8348 #if PAGETABLE_LEVELS > 3
8349 typedef struct { pudval_t pud; } pud_t;
8350
8351@@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8352 return pud.pud;
8353 }
8354 #else
8355-#include <asm-generic/pgtable-nopud.h>
8356-
8357 static inline pudval_t native_pud_val(pud_t pud)
8358 {
8359 return native_pgd_val(pud.pgd);
8360@@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8361 return pmd.pmd;
8362 }
8363 #else
8364-#include <asm-generic/pgtable-nopmd.h>
8365-
8366 static inline pmdval_t native_pmd_val(pmd_t pmd)
8367 {
8368 return native_pgd_val(pmd.pud.pgd);
8369@@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8370
8371 extern pteval_t __supported_pte_mask;
8372 extern void set_nx(void);
8373-extern int nx_enabled;
8374
8375 #define pgprot_writecombine pgprot_writecombine
8376 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8377diff -urNp linux-3.0.4/arch/x86/include/asm/processor.h linux-3.0.4/arch/x86/include/asm/processor.h
8378--- linux-3.0.4/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
8379+++ linux-3.0.4/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
8380@@ -266,7 +266,7 @@ struct tss_struct {
8381
8382 } ____cacheline_aligned;
8383
8384-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8385+extern struct tss_struct init_tss[NR_CPUS];
8386
8387 /*
8388 * Save the original ist values for checking stack pointers during debugging
8389@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8390 */
8391 #define TASK_SIZE PAGE_OFFSET
8392 #define TASK_SIZE_MAX TASK_SIZE
8393+
8394+#ifdef CONFIG_PAX_SEGMEXEC
8395+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8396+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8397+#else
8398 #define STACK_TOP TASK_SIZE
8399-#define STACK_TOP_MAX STACK_TOP
8400+#endif
8401+
8402+#define STACK_TOP_MAX TASK_SIZE
8403
8404 #define INIT_THREAD { \
8405- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8406+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8407 .vm86_info = NULL, \
8408 .sysenter_cs = __KERNEL_CS, \
8409 .io_bitmap_ptr = NULL, \
8410@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8411 */
8412 #define INIT_TSS { \
8413 .x86_tss = { \
8414- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8415+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8416 .ss0 = __KERNEL_DS, \
8417 .ss1 = __KERNEL_CS, \
8418 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8419@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8420 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8421
8422 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8423-#define KSTK_TOP(info) \
8424-({ \
8425- unsigned long *__ptr = (unsigned long *)(info); \
8426- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8427-})
8428+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8429
8430 /*
8431 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8432@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8433 #define task_pt_regs(task) \
8434 ({ \
8435 struct pt_regs *__regs__; \
8436- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8437+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8438 __regs__ - 1; \
8439 })
8440
8441@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8442 /*
8443 * User space process size. 47bits minus one guard page.
8444 */
8445-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8446+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8447
8448 /* This decides where the kernel will search for a free chunk of vm
8449 * space during mmap's.
8450 */
8451 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8452- 0xc0000000 : 0xFFFFe000)
8453+ 0xc0000000 : 0xFFFFf000)
8454
8455 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8456 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8457@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8458 #define STACK_TOP_MAX TASK_SIZE_MAX
8459
8460 #define INIT_THREAD { \
8461- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8462+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8463 }
8464
8465 #define INIT_TSS { \
8466- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8467+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8468 }
8469
8470 /*
8471@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8472 */
8473 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8474
8475+#ifdef CONFIG_PAX_SEGMEXEC
8476+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8477+#endif
8478+
8479 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8480
8481 /* Get/set a process' ability to use the timestamp counter instruction */
8482diff -urNp linux-3.0.4/arch/x86/include/asm/ptrace.h linux-3.0.4/arch/x86/include/asm/ptrace.h
8483--- linux-3.0.4/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
8484+++ linux-3.0.4/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
8485@@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8486 }
8487
8488 /*
8489- * user_mode_vm(regs) determines whether a register set came from user mode.
8490+ * user_mode(regs) determines whether a register set came from user mode.
8491 * This is true if V8086 mode was enabled OR if the register set was from
8492 * protected mode with RPL-3 CS value. This tricky test checks that with
8493 * one comparison. Many places in the kernel can bypass this full check
8494- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8495+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8496+ * be used.
8497 */
8498-static inline int user_mode(struct pt_regs *regs)
8499+static inline int user_mode_novm(struct pt_regs *regs)
8500 {
8501 #ifdef CONFIG_X86_32
8502 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8503 #else
8504- return !!(regs->cs & 3);
8505+ return !!(regs->cs & SEGMENT_RPL_MASK);
8506 #endif
8507 }
8508
8509-static inline int user_mode_vm(struct pt_regs *regs)
8510+static inline int user_mode(struct pt_regs *regs)
8511 {
8512 #ifdef CONFIG_X86_32
8513 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8514 USER_RPL;
8515 #else
8516- return user_mode(regs);
8517+ return user_mode_novm(regs);
8518 #endif
8519 }
8520
8521diff -urNp linux-3.0.4/arch/x86/include/asm/reboot.h linux-3.0.4/arch/x86/include/asm/reboot.h
8522--- linux-3.0.4/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
8523+++ linux-3.0.4/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
8524@@ -6,19 +6,19 @@
8525 struct pt_regs;
8526
8527 struct machine_ops {
8528- void (*restart)(char *cmd);
8529- void (*halt)(void);
8530- void (*power_off)(void);
8531+ void (* __noreturn restart)(char *cmd);
8532+ void (* __noreturn halt)(void);
8533+ void (* __noreturn power_off)(void);
8534 void (*shutdown)(void);
8535 void (*crash_shutdown)(struct pt_regs *);
8536- void (*emergency_restart)(void);
8537-};
8538+ void (* __noreturn emergency_restart)(void);
8539+} __no_const;
8540
8541 extern struct machine_ops machine_ops;
8542
8543 void native_machine_crash_shutdown(struct pt_regs *regs);
8544 void native_machine_shutdown(void);
8545-void machine_real_restart(unsigned int type);
8546+void machine_real_restart(unsigned int type) __noreturn;
8547 /* These must match dispatch_table in reboot_32.S */
8548 #define MRR_BIOS 0
8549 #define MRR_APM 1
8550diff -urNp linux-3.0.4/arch/x86/include/asm/rwsem.h linux-3.0.4/arch/x86/include/asm/rwsem.h
8551--- linux-3.0.4/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
8552+++ linux-3.0.4/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
8553@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8554 {
8555 asm volatile("# beginning down_read\n\t"
8556 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8557+
8558+#ifdef CONFIG_PAX_REFCOUNT
8559+ "jno 0f\n"
8560+ LOCK_PREFIX _ASM_DEC "(%1)\n"
8561+ "int $4\n0:\n"
8562+ _ASM_EXTABLE(0b, 0b)
8563+#endif
8564+
8565 /* adds 0x00000001 */
8566 " jns 1f\n"
8567 " call call_rwsem_down_read_failed\n"
8568@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8569 "1:\n\t"
8570 " mov %1,%2\n\t"
8571 " add %3,%2\n\t"
8572+
8573+#ifdef CONFIG_PAX_REFCOUNT
8574+ "jno 0f\n"
8575+ "sub %3,%2\n"
8576+ "int $4\n0:\n"
8577+ _ASM_EXTABLE(0b, 0b)
8578+#endif
8579+
8580 " jle 2f\n\t"
8581 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8582 " jnz 1b\n\t"
8583@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8584 long tmp;
8585 asm volatile("# beginning down_write\n\t"
8586 LOCK_PREFIX " xadd %1,(%2)\n\t"
8587+
8588+#ifdef CONFIG_PAX_REFCOUNT
8589+ "jno 0f\n"
8590+ "mov %1,(%2)\n"
8591+ "int $4\n0:\n"
8592+ _ASM_EXTABLE(0b, 0b)
8593+#endif
8594+
8595 /* adds 0xffff0001, returns the old value */
8596 " test %1,%1\n\t"
8597 /* was the count 0 before? */
8598@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8599 long tmp;
8600 asm volatile("# beginning __up_read\n\t"
8601 LOCK_PREFIX " xadd %1,(%2)\n\t"
8602+
8603+#ifdef CONFIG_PAX_REFCOUNT
8604+ "jno 0f\n"
8605+ "mov %1,(%2)\n"
8606+ "int $4\n0:\n"
8607+ _ASM_EXTABLE(0b, 0b)
8608+#endif
8609+
8610 /* subtracts 1, returns the old value */
8611 " jns 1f\n\t"
8612 " call call_rwsem_wake\n" /* expects old value in %edx */
8613@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8614 long tmp;
8615 asm volatile("# beginning __up_write\n\t"
8616 LOCK_PREFIX " xadd %1,(%2)\n\t"
8617+
8618+#ifdef CONFIG_PAX_REFCOUNT
8619+ "jno 0f\n"
8620+ "mov %1,(%2)\n"
8621+ "int $4\n0:\n"
8622+ _ASM_EXTABLE(0b, 0b)
8623+#endif
8624+
8625 /* subtracts 0xffff0001, returns the old value */
8626 " jns 1f\n\t"
8627 " call call_rwsem_wake\n" /* expects old value in %edx */
8628@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8629 {
8630 asm volatile("# beginning __downgrade_write\n\t"
8631 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8632+
8633+#ifdef CONFIG_PAX_REFCOUNT
8634+ "jno 0f\n"
8635+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8636+ "int $4\n0:\n"
8637+ _ASM_EXTABLE(0b, 0b)
8638+#endif
8639+
8640 /*
8641 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8642 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8643@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8644 */
8645 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8646 {
8647- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8648+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8649+
8650+#ifdef CONFIG_PAX_REFCOUNT
8651+ "jno 0f\n"
8652+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
8653+ "int $4\n0:\n"
8654+ _ASM_EXTABLE(0b, 0b)
8655+#endif
8656+
8657 : "+m" (sem->count)
8658 : "er" (delta));
8659 }
8660@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8661 {
8662 long tmp = delta;
8663
8664- asm volatile(LOCK_PREFIX "xadd %0,%1"
8665+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8666+
8667+#ifdef CONFIG_PAX_REFCOUNT
8668+ "jno 0f\n"
8669+ "mov %0,%1\n"
8670+ "int $4\n0:\n"
8671+ _ASM_EXTABLE(0b, 0b)
8672+#endif
8673+
8674 : "+r" (tmp), "+m" (sem->count)
8675 : : "memory");
8676
8677diff -urNp linux-3.0.4/arch/x86/include/asm/segment.h linux-3.0.4/arch/x86/include/asm/segment.h
8678--- linux-3.0.4/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
8679+++ linux-3.0.4/arch/x86/include/asm/segment.h 2011-08-23 21:47:55.000000000 -0400
8680@@ -64,8 +64,8 @@
8681 * 26 - ESPFIX small SS
8682 * 27 - per-cpu [ offset to per-cpu data area ]
8683 * 28 - stack_canary-20 [ for stack protector ]
8684- * 29 - unused
8685- * 30 - unused
8686+ * 29 - PCI BIOS CS
8687+ * 30 - PCI BIOS DS
8688 * 31 - TSS for double fault handler
8689 */
8690 #define GDT_ENTRY_TLS_MIN 6
8691@@ -79,6 +79,8 @@
8692
8693 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8694
8695+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8696+
8697 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8698
8699 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8700@@ -104,6 +106,12 @@
8701 #define __KERNEL_STACK_CANARY 0
8702 #endif
8703
8704+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8705+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8706+
8707+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8708+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8709+
8710 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8711
8712 /*
8713@@ -141,7 +149,7 @@
8714 */
8715
8716 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8717-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8718+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8719
8720
8721 #else
8722@@ -165,6 +173,8 @@
8723 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8724 #define __USER32_DS __USER_DS
8725
8726+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8727+
8728 #define GDT_ENTRY_TSS 8 /* needs two entries */
8729 #define GDT_ENTRY_LDT 10 /* needs two entries */
8730 #define GDT_ENTRY_TLS_MIN 12
8731@@ -185,6 +195,7 @@
8732 #endif
8733
8734 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8735+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8736 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8737 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8738 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8739diff -urNp linux-3.0.4/arch/x86/include/asm/smp.h linux-3.0.4/arch/x86/include/asm/smp.h
8740--- linux-3.0.4/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
8741+++ linux-3.0.4/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
8742@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8743 /* cpus sharing the last level cache: */
8744 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8745 DECLARE_PER_CPU(u16, cpu_llc_id);
8746-DECLARE_PER_CPU(int, cpu_number);
8747+DECLARE_PER_CPU(unsigned int, cpu_number);
8748
8749 static inline struct cpumask *cpu_sibling_mask(int cpu)
8750 {
8751@@ -77,7 +77,7 @@ struct smp_ops {
8752
8753 void (*send_call_func_ipi)(const struct cpumask *mask);
8754 void (*send_call_func_single_ipi)(int cpu);
8755-};
8756+} __no_const;
8757
8758 /* Globals due to paravirt */
8759 extern void set_cpu_sibling_map(int cpu);
8760@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8761 extern int safe_smp_processor_id(void);
8762
8763 #elif defined(CONFIG_X86_64_SMP)
8764-#define raw_smp_processor_id() (percpu_read(cpu_number))
8765-
8766-#define stack_smp_processor_id() \
8767-({ \
8768- struct thread_info *ti; \
8769- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8770- ti->cpu; \
8771-})
8772+#define raw_smp_processor_id() (percpu_read(cpu_number))
8773+#define stack_smp_processor_id() raw_smp_processor_id()
8774 #define safe_smp_processor_id() smp_processor_id()
8775
8776 #endif
8777diff -urNp linux-3.0.4/arch/x86/include/asm/spinlock.h linux-3.0.4/arch/x86/include/asm/spinlock.h
8778--- linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
8779+++ linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
8780@@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8781 static inline void arch_read_lock(arch_rwlock_t *rw)
8782 {
8783 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8784+
8785+#ifdef CONFIG_PAX_REFCOUNT
8786+ "jno 0f\n"
8787+ LOCK_PREFIX " addl $1,(%0)\n"
8788+ "int $4\n0:\n"
8789+ _ASM_EXTABLE(0b, 0b)
8790+#endif
8791+
8792 "jns 1f\n"
8793 "call __read_lock_failed\n\t"
8794 "1:\n"
8795@@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8796 static inline void arch_write_lock(arch_rwlock_t *rw)
8797 {
8798 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8799+
8800+#ifdef CONFIG_PAX_REFCOUNT
8801+ "jno 0f\n"
8802+ LOCK_PREFIX " addl %1,(%0)\n"
8803+ "int $4\n0:\n"
8804+ _ASM_EXTABLE(0b, 0b)
8805+#endif
8806+
8807 "jz 1f\n"
8808 "call __write_lock_failed\n\t"
8809 "1:\n"
8810@@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8811
8812 static inline void arch_read_unlock(arch_rwlock_t *rw)
8813 {
8814- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8815+ asm volatile(LOCK_PREFIX "incl %0\n"
8816+
8817+#ifdef CONFIG_PAX_REFCOUNT
8818+ "jno 0f\n"
8819+ LOCK_PREFIX "decl %0\n"
8820+ "int $4\n0:\n"
8821+ _ASM_EXTABLE(0b, 0b)
8822+#endif
8823+
8824+ :"+m" (rw->lock) : : "memory");
8825 }
8826
8827 static inline void arch_write_unlock(arch_rwlock_t *rw)
8828 {
8829- asm volatile(LOCK_PREFIX "addl %1, %0"
8830+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
8831+
8832+#ifdef CONFIG_PAX_REFCOUNT
8833+ "jno 0f\n"
8834+ LOCK_PREFIX "subl %1, %0\n"
8835+ "int $4\n0:\n"
8836+ _ASM_EXTABLE(0b, 0b)
8837+#endif
8838+
8839 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8840 }
8841
8842diff -urNp linux-3.0.4/arch/x86/include/asm/stackprotector.h linux-3.0.4/arch/x86/include/asm/stackprotector.h
8843--- linux-3.0.4/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
8844+++ linux-3.0.4/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
8845@@ -48,7 +48,7 @@
8846 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8847 */
8848 #define GDT_STACK_CANARY_INIT \
8849- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8850+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8851
8852 /*
8853 * Initialize the stackprotector canary value.
8854@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8855
8856 static inline void load_stack_canary_segment(void)
8857 {
8858-#ifdef CONFIG_X86_32
8859+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8860 asm volatile ("mov %0, %%gs" : : "r" (0));
8861 #endif
8862 }
8863diff -urNp linux-3.0.4/arch/x86/include/asm/stacktrace.h linux-3.0.4/arch/x86/include/asm/stacktrace.h
8864--- linux-3.0.4/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
8865+++ linux-3.0.4/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
8866@@ -11,28 +11,20 @@
8867
8868 extern int kstack_depth_to_print;
8869
8870-struct thread_info;
8871+struct task_struct;
8872 struct stacktrace_ops;
8873
8874-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8875- unsigned long *stack,
8876- unsigned long bp,
8877- const struct stacktrace_ops *ops,
8878- void *data,
8879- unsigned long *end,
8880- int *graph);
8881-
8882-extern unsigned long
8883-print_context_stack(struct thread_info *tinfo,
8884- unsigned long *stack, unsigned long bp,
8885- const struct stacktrace_ops *ops, void *data,
8886- unsigned long *end, int *graph);
8887-
8888-extern unsigned long
8889-print_context_stack_bp(struct thread_info *tinfo,
8890- unsigned long *stack, unsigned long bp,
8891- const struct stacktrace_ops *ops, void *data,
8892- unsigned long *end, int *graph);
8893+typedef unsigned long walk_stack_t(struct task_struct *task,
8894+ void *stack_start,
8895+ unsigned long *stack,
8896+ unsigned long bp,
8897+ const struct stacktrace_ops *ops,
8898+ void *data,
8899+ unsigned long *end,
8900+ int *graph);
8901+
8902+extern walk_stack_t print_context_stack;
8903+extern walk_stack_t print_context_stack_bp;
8904
8905 /* Generic stack tracer with callbacks */
8906
8907@@ -40,7 +32,7 @@ struct stacktrace_ops {
8908 void (*address)(void *data, unsigned long address, int reliable);
8909 /* On negative return stop dumping */
8910 int (*stack)(void *data, char *name);
8911- walk_stack_t walk_stack;
8912+ walk_stack_t *walk_stack;
8913 };
8914
8915 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8916diff -urNp linux-3.0.4/arch/x86/include/asm/system.h linux-3.0.4/arch/x86/include/asm/system.h
8917--- linux-3.0.4/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
8918+++ linux-3.0.4/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
8919@@ -129,7 +129,7 @@ do { \
8920 "call __switch_to\n\t" \
8921 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8922 __switch_canary \
8923- "movq %P[thread_info](%%rsi),%%r8\n\t" \
8924+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
8925 "movq %%rax,%%rdi\n\t" \
8926 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
8927 "jnz ret_from_fork\n\t" \
8928@@ -140,7 +140,7 @@ do { \
8929 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
8930 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
8931 [_tif_fork] "i" (_TIF_FORK), \
8932- [thread_info] "i" (offsetof(struct task_struct, stack)), \
8933+ [thread_info] "m" (current_tinfo), \
8934 [current_task] "m" (current_task) \
8935 __switch_canary_iparam \
8936 : "memory", "cc" __EXTRA_CLOBBER)
8937@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
8938 {
8939 unsigned long __limit;
8940 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8941- return __limit + 1;
8942+ return __limit;
8943 }
8944
8945 static inline void native_clts(void)
8946@@ -397,12 +397,12 @@ void enable_hlt(void);
8947
8948 void cpu_idle_wait(void);
8949
8950-extern unsigned long arch_align_stack(unsigned long sp);
8951+#define arch_align_stack(x) ((x) & ~0xfUL)
8952 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8953
8954 void default_idle(void);
8955
8956-void stop_this_cpu(void *dummy);
8957+void stop_this_cpu(void *dummy) __noreturn;
8958
8959 /*
8960 * Force strict CPU ordering.
8961diff -urNp linux-3.0.4/arch/x86/include/asm/thread_info.h linux-3.0.4/arch/x86/include/asm/thread_info.h
8962--- linux-3.0.4/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
8963+++ linux-3.0.4/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
8964@@ -10,6 +10,7 @@
8965 #include <linux/compiler.h>
8966 #include <asm/page.h>
8967 #include <asm/types.h>
8968+#include <asm/percpu.h>
8969
8970 /*
8971 * low level task data that entry.S needs immediate access to
8972@@ -24,7 +25,6 @@ struct exec_domain;
8973 #include <asm/atomic.h>
8974
8975 struct thread_info {
8976- struct task_struct *task; /* main task structure */
8977 struct exec_domain *exec_domain; /* execution domain */
8978 __u32 flags; /* low level flags */
8979 __u32 status; /* thread synchronous flags */
8980@@ -34,18 +34,12 @@ struct thread_info {
8981 mm_segment_t addr_limit;
8982 struct restart_block restart_block;
8983 void __user *sysenter_return;
8984-#ifdef CONFIG_X86_32
8985- unsigned long previous_esp; /* ESP of the previous stack in
8986- case of nested (IRQ) stacks
8987- */
8988- __u8 supervisor_stack[0];
8989-#endif
8990+ unsigned long lowest_stack;
8991 int uaccess_err;
8992 };
8993
8994-#define INIT_THREAD_INFO(tsk) \
8995+#define INIT_THREAD_INFO \
8996 { \
8997- .task = &tsk, \
8998 .exec_domain = &default_exec_domain, \
8999 .flags = 0, \
9000 .cpu = 0, \
9001@@ -56,7 +50,7 @@ struct thread_info {
9002 }, \
9003 }
9004
9005-#define init_thread_info (init_thread_union.thread_info)
9006+#define init_thread_info (init_thread_union.stack)
9007 #define init_stack (init_thread_union.stack)
9008
9009 #else /* !__ASSEMBLY__ */
9010@@ -170,6 +164,23 @@ struct thread_info {
9011 ret; \
9012 })
9013
9014+#ifdef __ASSEMBLY__
9015+/* how to get the thread information struct from ASM */
9016+#define GET_THREAD_INFO(reg) \
9017+ mov PER_CPU_VAR(current_tinfo), reg
9018+
9019+/* use this one if reg already contains %esp */
9020+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9021+#else
9022+/* how to get the thread information struct from C */
9023+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9024+
9025+static __always_inline struct thread_info *current_thread_info(void)
9026+{
9027+ return percpu_read_stable(current_tinfo);
9028+}
9029+#endif
9030+
9031 #ifdef CONFIG_X86_32
9032
9033 #define STACK_WARN (THREAD_SIZE/8)
9034@@ -180,35 +191,13 @@ struct thread_info {
9035 */
9036 #ifndef __ASSEMBLY__
9037
9038-
9039 /* how to get the current stack pointer from C */
9040 register unsigned long current_stack_pointer asm("esp") __used;
9041
9042-/* how to get the thread information struct from C */
9043-static inline struct thread_info *current_thread_info(void)
9044-{
9045- return (struct thread_info *)
9046- (current_stack_pointer & ~(THREAD_SIZE - 1));
9047-}
9048-
9049-#else /* !__ASSEMBLY__ */
9050-
9051-/* how to get the thread information struct from ASM */
9052-#define GET_THREAD_INFO(reg) \
9053- movl $-THREAD_SIZE, reg; \
9054- andl %esp, reg
9055-
9056-/* use this one if reg already contains %esp */
9057-#define GET_THREAD_INFO_WITH_ESP(reg) \
9058- andl $-THREAD_SIZE, reg
9059-
9060 #endif
9061
9062 #else /* X86_32 */
9063
9064-#include <asm/percpu.h>
9065-#define KERNEL_STACK_OFFSET (5*8)
9066-
9067 /*
9068 * macros/functions for gaining access to the thread information structure
9069 * preempt_count needs to be 1 initially, until the scheduler is functional.
9070@@ -216,21 +205,8 @@ static inline struct thread_info *curren
9071 #ifndef __ASSEMBLY__
9072 DECLARE_PER_CPU(unsigned long, kernel_stack);
9073
9074-static inline struct thread_info *current_thread_info(void)
9075-{
9076- struct thread_info *ti;
9077- ti = (void *)(percpu_read_stable(kernel_stack) +
9078- KERNEL_STACK_OFFSET - THREAD_SIZE);
9079- return ti;
9080-}
9081-
9082-#else /* !__ASSEMBLY__ */
9083-
9084-/* how to get the thread information struct from ASM */
9085-#define GET_THREAD_INFO(reg) \
9086- movq PER_CPU_VAR(kernel_stack),reg ; \
9087- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9088-
9089+/* how to get the current stack pointer from C */
9090+register unsigned long current_stack_pointer asm("rsp") __used;
9091 #endif
9092
9093 #endif /* !X86_32 */
9094@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9095 extern void free_thread_info(struct thread_info *ti);
9096 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9097 #define arch_task_cache_init arch_task_cache_init
9098+
9099+#define __HAVE_THREAD_FUNCTIONS
9100+#define task_thread_info(task) (&(task)->tinfo)
9101+#define task_stack_page(task) ((task)->stack)
9102+#define setup_thread_stack(p, org) do {} while (0)
9103+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9104+
9105+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9106+extern struct task_struct *alloc_task_struct_node(int node);
9107+extern void free_task_struct(struct task_struct *);
9108+
9109 #endif
9110 #endif /* _ASM_X86_THREAD_INFO_H */
9111diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_32.h linux-3.0.4/arch/x86/include/asm/uaccess_32.h
9112--- linux-3.0.4/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
9113+++ linux-3.0.4/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
9114@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9115 static __always_inline unsigned long __must_check
9116 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9117 {
9118+ pax_track_stack();
9119+
9120+ if ((long)n < 0)
9121+ return n;
9122+
9123 if (__builtin_constant_p(n)) {
9124 unsigned long ret;
9125
9126@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9127 return ret;
9128 }
9129 }
9130+ if (!__builtin_constant_p(n))
9131+ check_object_size(from, n, true);
9132 return __copy_to_user_ll(to, from, n);
9133 }
9134
9135@@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9136 __copy_to_user(void __user *to, const void *from, unsigned long n)
9137 {
9138 might_fault();
9139+
9140 return __copy_to_user_inatomic(to, from, n);
9141 }
9142
9143 static __always_inline unsigned long
9144 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9145 {
9146+ if ((long)n < 0)
9147+ return n;
9148+
9149 /* Avoid zeroing the tail if the copy fails..
9150 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9151 * but as the zeroing behaviour is only significant when n is not
9152@@ -137,6 +148,12 @@ static __always_inline unsigned long
9153 __copy_from_user(void *to, const void __user *from, unsigned long n)
9154 {
9155 might_fault();
9156+
9157+ pax_track_stack();
9158+
9159+ if ((long)n < 0)
9160+ return n;
9161+
9162 if (__builtin_constant_p(n)) {
9163 unsigned long ret;
9164
9165@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9166 return ret;
9167 }
9168 }
9169+ if (!__builtin_constant_p(n))
9170+ check_object_size(to, n, false);
9171 return __copy_from_user_ll(to, from, n);
9172 }
9173
9174@@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9175 const void __user *from, unsigned long n)
9176 {
9177 might_fault();
9178+
9179+ if ((long)n < 0)
9180+ return n;
9181+
9182 if (__builtin_constant_p(n)) {
9183 unsigned long ret;
9184
9185@@ -181,15 +204,19 @@ static __always_inline unsigned long
9186 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9187 unsigned long n)
9188 {
9189- return __copy_from_user_ll_nocache_nozero(to, from, n);
9190-}
9191+ if ((long)n < 0)
9192+ return n;
9193
9194-unsigned long __must_check copy_to_user(void __user *to,
9195- const void *from, unsigned long n);
9196-unsigned long __must_check _copy_from_user(void *to,
9197- const void __user *from,
9198- unsigned long n);
9199+ return __copy_from_user_ll_nocache_nozero(to, from, n);
9200+}
9201
9202+extern void copy_to_user_overflow(void)
9203+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9204+ __compiletime_error("copy_to_user() buffer size is not provably correct")
9205+#else
9206+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
9207+#endif
9208+;
9209
9210 extern void copy_from_user_overflow(void)
9211 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9212@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9213 #endif
9214 ;
9215
9216-static inline unsigned long __must_check copy_from_user(void *to,
9217- const void __user *from,
9218- unsigned long n)
9219+/**
9220+ * copy_to_user: - Copy a block of data into user space.
9221+ * @to: Destination address, in user space.
9222+ * @from: Source address, in kernel space.
9223+ * @n: Number of bytes to copy.
9224+ *
9225+ * Context: User context only. This function may sleep.
9226+ *
9227+ * Copy data from kernel space to user space.
9228+ *
9229+ * Returns number of bytes that could not be copied.
9230+ * On success, this will be zero.
9231+ */
9232+static inline unsigned long __must_check
9233+copy_to_user(void __user *to, const void *from, unsigned long n)
9234+{
9235+ int sz = __compiletime_object_size(from);
9236+
9237+ if (unlikely(sz != -1 && sz < n))
9238+ copy_to_user_overflow();
9239+ else if (access_ok(VERIFY_WRITE, to, n))
9240+ n = __copy_to_user(to, from, n);
9241+ return n;
9242+}
9243+
9244+/**
9245+ * copy_from_user: - Copy a block of data from user space.
9246+ * @to: Destination address, in kernel space.
9247+ * @from: Source address, in user space.
9248+ * @n: Number of bytes to copy.
9249+ *
9250+ * Context: User context only. This function may sleep.
9251+ *
9252+ * Copy data from user space to kernel space.
9253+ *
9254+ * Returns number of bytes that could not be copied.
9255+ * On success, this will be zero.
9256+ *
9257+ * If some data could not be copied, this function will pad the copied
9258+ * data to the requested size using zero bytes.
9259+ */
9260+static inline unsigned long __must_check
9261+copy_from_user(void *to, const void __user *from, unsigned long n)
9262 {
9263 int sz = __compiletime_object_size(to);
9264
9265- if (likely(sz == -1 || sz >= n))
9266- n = _copy_from_user(to, from, n);
9267- else
9268+ if (unlikely(sz != -1 && sz < n))
9269 copy_from_user_overflow();
9270-
9271+ else if (access_ok(VERIFY_READ, from, n))
9272+ n = __copy_from_user(to, from, n);
9273+ else if ((long)n > 0) {
9274+ if (!__builtin_constant_p(n))
9275+ check_object_size(to, n, false);
9276+ memset(to, 0, n);
9277+ }
9278 return n;
9279 }
9280
9281diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_64.h linux-3.0.4/arch/x86/include/asm/uaccess_64.h
9282--- linux-3.0.4/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
9283+++ linux-3.0.4/arch/x86/include/asm/uaccess_64.h 2011-08-23 21:48:14.000000000 -0400
9284@@ -10,6 +10,9 @@
9285 #include <asm/alternative.h>
9286 #include <asm/cpufeature.h>
9287 #include <asm/page.h>
9288+#include <asm/pgtable.h>
9289+
9290+#define set_fs(x) (current_thread_info()->addr_limit = (x))
9291
9292 /*
9293 * Copy To/From Userspace
9294@@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9295 return ret;
9296 }
9297
9298-__must_check unsigned long
9299-_copy_to_user(void __user *to, const void *from, unsigned len);
9300-__must_check unsigned long
9301-_copy_from_user(void *to, const void __user *from, unsigned len);
9302+static __always_inline __must_check unsigned long
9303+__copy_to_user(void __user *to, const void *from, unsigned len);
9304+static __always_inline __must_check unsigned long
9305+__copy_from_user(void *to, const void __user *from, unsigned len);
9306 __must_check unsigned long
9307 copy_in_user(void __user *to, const void __user *from, unsigned len);
9308
9309 static inline unsigned long __must_check copy_from_user(void *to,
9310 const void __user *from,
9311- unsigned long n)
9312+ unsigned n)
9313 {
9314- int sz = __compiletime_object_size(to);
9315-
9316 might_fault();
9317- if (likely(sz == -1 || sz >= n))
9318- n = _copy_from_user(to, from, n);
9319-#ifdef CONFIG_DEBUG_VM
9320- else
9321- WARN(1, "Buffer overflow detected!\n");
9322-#endif
9323+
9324+ if (access_ok(VERIFY_READ, from, n))
9325+ n = __copy_from_user(to, from, n);
9326+ else if ((int)n > 0) {
9327+ if (!__builtin_constant_p(n))
9328+ check_object_size(to, n, false);
9329+ memset(to, 0, n);
9330+ }
9331 return n;
9332 }
9333
9334@@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9335 {
9336 might_fault();
9337
9338- return _copy_to_user(dst, src, size);
9339+ if (access_ok(VERIFY_WRITE, dst, size))
9340+ size = __copy_to_user(dst, src, size);
9341+ return size;
9342 }
9343
9344 static __always_inline __must_check
9345-int __copy_from_user(void *dst, const void __user *src, unsigned size)
9346+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9347 {
9348- int ret = 0;
9349+ int sz = __compiletime_object_size(dst);
9350+ unsigned ret = 0;
9351
9352 might_fault();
9353- if (!__builtin_constant_p(size))
9354- return copy_user_generic(dst, (__force void *)src, size);
9355+
9356+ pax_track_stack();
9357+
9358+ if ((int)size < 0)
9359+ return size;
9360+
9361+#ifdef CONFIG_PAX_MEMORY_UDEREF
9362+ if (!__access_ok(VERIFY_READ, src, size))
9363+ return size;
9364+#endif
9365+
9366+ if (unlikely(sz != -1 && sz < size)) {
9367+#ifdef CONFIG_DEBUG_VM
9368+ WARN(1, "Buffer overflow detected!\n");
9369+#endif
9370+ return size;
9371+ }
9372+
9373+ if (!__builtin_constant_p(size)) {
9374+ check_object_size(dst, size, false);
9375+
9376+#ifdef CONFIG_PAX_MEMORY_UDEREF
9377+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9378+ src += PAX_USER_SHADOW_BASE;
9379+#endif
9380+
9381+ return copy_user_generic(dst, (__force const void *)src, size);
9382+ }
9383 switch (size) {
9384- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9385+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9386 ret, "b", "b", "=q", 1);
9387 return ret;
9388- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9389+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9390 ret, "w", "w", "=r", 2);
9391 return ret;
9392- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9393+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9394 ret, "l", "k", "=r", 4);
9395 return ret;
9396- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9397+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9398 ret, "q", "", "=r", 8);
9399 return ret;
9400 case 10:
9401- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9402+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9403 ret, "q", "", "=r", 10);
9404 if (unlikely(ret))
9405 return ret;
9406 __get_user_asm(*(u16 *)(8 + (char *)dst),
9407- (u16 __user *)(8 + (char __user *)src),
9408+ (const u16 __user *)(8 + (const char __user *)src),
9409 ret, "w", "w", "=r", 2);
9410 return ret;
9411 case 16:
9412- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9413+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9414 ret, "q", "", "=r", 16);
9415 if (unlikely(ret))
9416 return ret;
9417 __get_user_asm(*(u64 *)(8 + (char *)dst),
9418- (u64 __user *)(8 + (char __user *)src),
9419+ (const u64 __user *)(8 + (const char __user *)src),
9420 ret, "q", "", "=r", 8);
9421 return ret;
9422 default:
9423- return copy_user_generic(dst, (__force void *)src, size);
9424+
9425+#ifdef CONFIG_PAX_MEMORY_UDEREF
9426+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9427+ src += PAX_USER_SHADOW_BASE;
9428+#endif
9429+
9430+ return copy_user_generic(dst, (__force const void *)src, size);
9431 }
9432 }
9433
9434 static __always_inline __must_check
9435-int __copy_to_user(void __user *dst, const void *src, unsigned size)
9436+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9437 {
9438- int ret = 0;
9439+ int sz = __compiletime_object_size(src);
9440+ unsigned ret = 0;
9441
9442 might_fault();
9443- if (!__builtin_constant_p(size))
9444+
9445+ pax_track_stack();
9446+
9447+ if ((int)size < 0)
9448+ return size;
9449+
9450+#ifdef CONFIG_PAX_MEMORY_UDEREF
9451+ if (!__access_ok(VERIFY_WRITE, dst, size))
9452+ return size;
9453+#endif
9454+
9455+ if (unlikely(sz != -1 && sz < size)) {
9456+#ifdef CONFIG_DEBUG_VM
9457+ WARN(1, "Buffer overflow detected!\n");
9458+#endif
9459+ return size;
9460+ }
9461+
9462+ if (!__builtin_constant_p(size)) {
9463+ check_object_size(src, size, true);
9464+
9465+#ifdef CONFIG_PAX_MEMORY_UDEREF
9466+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9467+ dst += PAX_USER_SHADOW_BASE;
9468+#endif
9469+
9470 return copy_user_generic((__force void *)dst, src, size);
9471+ }
9472 switch (size) {
9473- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9474+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9475 ret, "b", "b", "iq", 1);
9476 return ret;
9477- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9478+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9479 ret, "w", "w", "ir", 2);
9480 return ret;
9481- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9482+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9483 ret, "l", "k", "ir", 4);
9484 return ret;
9485- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9486+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9487 ret, "q", "", "er", 8);
9488 return ret;
9489 case 10:
9490- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9491+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9492 ret, "q", "", "er", 10);
9493 if (unlikely(ret))
9494 return ret;
9495 asm("":::"memory");
9496- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9497+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9498 ret, "w", "w", "ir", 2);
9499 return ret;
9500 case 16:
9501- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9502+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9503 ret, "q", "", "er", 16);
9504 if (unlikely(ret))
9505 return ret;
9506 asm("":::"memory");
9507- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9508+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9509 ret, "q", "", "er", 8);
9510 return ret;
9511 default:
9512+
9513+#ifdef CONFIG_PAX_MEMORY_UDEREF
9514+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9515+ dst += PAX_USER_SHADOW_BASE;
9516+#endif
9517+
9518 return copy_user_generic((__force void *)dst, src, size);
9519 }
9520 }
9521
9522 static __always_inline __must_check
9523-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9524+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9525 {
9526- int ret = 0;
9527+ unsigned ret = 0;
9528
9529 might_fault();
9530- if (!__builtin_constant_p(size))
9531+
9532+ if ((int)size < 0)
9533+ return size;
9534+
9535+#ifdef CONFIG_PAX_MEMORY_UDEREF
9536+ if (!__access_ok(VERIFY_READ, src, size))
9537+ return size;
9538+ if (!__access_ok(VERIFY_WRITE, dst, size))
9539+ return size;
9540+#endif
9541+
9542+ if (!__builtin_constant_p(size)) {
9543+
9544+#ifdef CONFIG_PAX_MEMORY_UDEREF
9545+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9546+ src += PAX_USER_SHADOW_BASE;
9547+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9548+ dst += PAX_USER_SHADOW_BASE;
9549+#endif
9550+
9551 return copy_user_generic((__force void *)dst,
9552- (__force void *)src, size);
9553+ (__force const void *)src, size);
9554+ }
9555 switch (size) {
9556 case 1: {
9557 u8 tmp;
9558- __get_user_asm(tmp, (u8 __user *)src,
9559+ __get_user_asm(tmp, (const u8 __user *)src,
9560 ret, "b", "b", "=q", 1);
9561 if (likely(!ret))
9562 __put_user_asm(tmp, (u8 __user *)dst,
9563@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9564 }
9565 case 2: {
9566 u16 tmp;
9567- __get_user_asm(tmp, (u16 __user *)src,
9568+ __get_user_asm(tmp, (const u16 __user *)src,
9569 ret, "w", "w", "=r", 2);
9570 if (likely(!ret))
9571 __put_user_asm(tmp, (u16 __user *)dst,
9572@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9573
9574 case 4: {
9575 u32 tmp;
9576- __get_user_asm(tmp, (u32 __user *)src,
9577+ __get_user_asm(tmp, (const u32 __user *)src,
9578 ret, "l", "k", "=r", 4);
9579 if (likely(!ret))
9580 __put_user_asm(tmp, (u32 __user *)dst,
9581@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9582 }
9583 case 8: {
9584 u64 tmp;
9585- __get_user_asm(tmp, (u64 __user *)src,
9586+ __get_user_asm(tmp, (const u64 __user *)src,
9587 ret, "q", "", "=r", 8);
9588 if (likely(!ret))
9589 __put_user_asm(tmp, (u64 __user *)dst,
9590@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9591 return ret;
9592 }
9593 default:
9594+
9595+#ifdef CONFIG_PAX_MEMORY_UDEREF
9596+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9597+ src += PAX_USER_SHADOW_BASE;
9598+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9599+ dst += PAX_USER_SHADOW_BASE;
9600+#endif
9601+
9602 return copy_user_generic((__force void *)dst,
9603- (__force void *)src, size);
9604+ (__force const void *)src, size);
9605 }
9606 }
9607
9608@@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9609 static __must_check __always_inline int
9610 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9611 {
9612+ pax_track_stack();
9613+
9614+ if ((int)size < 0)
9615+ return size;
9616+
9617+#ifdef CONFIG_PAX_MEMORY_UDEREF
9618+ if (!__access_ok(VERIFY_READ, src, size))
9619+ return size;
9620+
9621+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9622+ src += PAX_USER_SHADOW_BASE;
9623+#endif
9624+
9625 return copy_user_generic(dst, (__force const void *)src, size);
9626 }
9627
9628-static __must_check __always_inline int
9629+static __must_check __always_inline unsigned long
9630 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9631 {
9632+ if ((int)size < 0)
9633+ return size;
9634+
9635+#ifdef CONFIG_PAX_MEMORY_UDEREF
9636+ if (!__access_ok(VERIFY_WRITE, dst, size))
9637+ return size;
9638+
9639+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9640+ dst += PAX_USER_SHADOW_BASE;
9641+#endif
9642+
9643 return copy_user_generic((__force void *)dst, src, size);
9644 }
9645
9646-extern long __copy_user_nocache(void *dst, const void __user *src,
9647+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9648 unsigned size, int zerorest);
9649
9650-static inline int
9651-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9652+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9653 {
9654 might_sleep();
9655+
9656+ if ((int)size < 0)
9657+ return size;
9658+
9659+#ifdef CONFIG_PAX_MEMORY_UDEREF
9660+ if (!__access_ok(VERIFY_READ, src, size))
9661+ return size;
9662+#endif
9663+
9664 return __copy_user_nocache(dst, src, size, 1);
9665 }
9666
9667-static inline int
9668-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9669+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9670 unsigned size)
9671 {
9672+ if ((int)size < 0)
9673+ return size;
9674+
9675+#ifdef CONFIG_PAX_MEMORY_UDEREF
9676+ if (!__access_ok(VERIFY_READ, src, size))
9677+ return size;
9678+#endif
9679+
9680 return __copy_user_nocache(dst, src, size, 0);
9681 }
9682
9683-unsigned long
9684+extern unsigned long
9685 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9686
9687 #endif /* _ASM_X86_UACCESS_64_H */
9688diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess.h linux-3.0.4/arch/x86/include/asm/uaccess.h
9689--- linux-3.0.4/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9690+++ linux-3.0.4/arch/x86/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
9691@@ -7,12 +7,15 @@
9692 #include <linux/compiler.h>
9693 #include <linux/thread_info.h>
9694 #include <linux/string.h>
9695+#include <linux/sched.h>
9696 #include <asm/asm.h>
9697 #include <asm/page.h>
9698
9699 #define VERIFY_READ 0
9700 #define VERIFY_WRITE 1
9701
9702+extern void check_object_size(const void *ptr, unsigned long n, bool to);
9703+
9704 /*
9705 * The fs value determines whether argument validity checking should be
9706 * performed or not. If get_fs() == USER_DS, checking is performed, with
9707@@ -28,7 +31,12 @@
9708
9709 #define get_ds() (KERNEL_DS)
9710 #define get_fs() (current_thread_info()->addr_limit)
9711+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9712+void __set_fs(mm_segment_t x);
9713+void set_fs(mm_segment_t x);
9714+#else
9715 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9716+#endif
9717
9718 #define segment_eq(a, b) ((a).seg == (b).seg)
9719
9720@@ -76,7 +84,33 @@
9721 * checks that the pointer is in the user space range - after calling
9722 * this function, memory access functions may still return -EFAULT.
9723 */
9724-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9725+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9726+#define access_ok(type, addr, size) \
9727+({ \
9728+ long __size = size; \
9729+ unsigned long __addr = (unsigned long)addr; \
9730+ unsigned long __addr_ao = __addr & PAGE_MASK; \
9731+ unsigned long __end_ao = __addr + __size - 1; \
9732+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9733+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9734+ while(__addr_ao <= __end_ao) { \
9735+ char __c_ao; \
9736+ __addr_ao += PAGE_SIZE; \
9737+ if (__size > PAGE_SIZE) \
9738+ cond_resched(); \
9739+ if (__get_user(__c_ao, (char __user *)__addr)) \
9740+ break; \
9741+ if (type != VERIFY_WRITE) { \
9742+ __addr = __addr_ao; \
9743+ continue; \
9744+ } \
9745+ if (__put_user(__c_ao, (char __user *)__addr)) \
9746+ break; \
9747+ __addr = __addr_ao; \
9748+ } \
9749+ } \
9750+ __ret_ao; \
9751+})
9752
9753 /*
9754 * The exception table consists of pairs of addresses: the first is the
9755@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9756 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9757 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9758
9759-
9760+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9761+#define __copyuser_seg "gs;"
9762+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9763+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9764+#else
9765+#define __copyuser_seg
9766+#define __COPYUSER_SET_ES
9767+#define __COPYUSER_RESTORE_ES
9768+#endif
9769
9770 #ifdef CONFIG_X86_32
9771 #define __put_user_asm_u64(x, addr, err, errret) \
9772- asm volatile("1: movl %%eax,0(%2)\n" \
9773- "2: movl %%edx,4(%2)\n" \
9774+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9775+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9776 "3:\n" \
9777 ".section .fixup,\"ax\"\n" \
9778 "4: movl %3,%0\n" \
9779@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
9780 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9781
9782 #define __put_user_asm_ex_u64(x, addr) \
9783- asm volatile("1: movl %%eax,0(%1)\n" \
9784- "2: movl %%edx,4(%1)\n" \
9785+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9786+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9787 "3:\n" \
9788 _ASM_EXTABLE(1b, 2b - 1b) \
9789 _ASM_EXTABLE(2b, 3b - 2b) \
9790@@ -373,7 +415,7 @@ do { \
9791 } while (0)
9792
9793 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9794- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9795+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9796 "2:\n" \
9797 ".section .fixup,\"ax\"\n" \
9798 "3: mov %3,%0\n" \
9799@@ -381,7 +423,7 @@ do { \
9800 " jmp 2b\n" \
9801 ".previous\n" \
9802 _ASM_EXTABLE(1b, 3b) \
9803- : "=r" (err), ltype(x) \
9804+ : "=r" (err), ltype (x) \
9805 : "m" (__m(addr)), "i" (errret), "0" (err))
9806
9807 #define __get_user_size_ex(x, ptr, size) \
9808@@ -406,7 +448,7 @@ do { \
9809 } while (0)
9810
9811 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9812- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9813+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9814 "2:\n" \
9815 _ASM_EXTABLE(1b, 2b - 1b) \
9816 : ltype(x) : "m" (__m(addr)))
9817@@ -423,13 +465,24 @@ do { \
9818 int __gu_err; \
9819 unsigned long __gu_val; \
9820 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9821- (x) = (__force __typeof__(*(ptr)))__gu_val; \
9822+ (x) = (__typeof__(*(ptr)))__gu_val; \
9823 __gu_err; \
9824 })
9825
9826 /* FIXME: this hack is definitely wrong -AK */
9827 struct __large_struct { unsigned long buf[100]; };
9828-#define __m(x) (*(struct __large_struct __user *)(x))
9829+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9830+#define ____m(x) \
9831+({ \
9832+ unsigned long ____x = (unsigned long)(x); \
9833+ if (____x < PAX_USER_SHADOW_BASE) \
9834+ ____x += PAX_USER_SHADOW_BASE; \
9835+ (void __user *)____x; \
9836+})
9837+#else
9838+#define ____m(x) (x)
9839+#endif
9840+#define __m(x) (*(struct __large_struct __user *)____m(x))
9841
9842 /*
9843 * Tell gcc we read from memory instead of writing: this is because
9844@@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
9845 * aliasing issues.
9846 */
9847 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9848- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9849+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9850 "2:\n" \
9851 ".section .fixup,\"ax\"\n" \
9852 "3: mov %3,%0\n" \
9853@@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
9854 ".previous\n" \
9855 _ASM_EXTABLE(1b, 3b) \
9856 : "=r"(err) \
9857- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9858+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9859
9860 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9861- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9862+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9863 "2:\n" \
9864 _ASM_EXTABLE(1b, 2b - 1b) \
9865 : : ltype(x), "m" (__m(addr)))
9866@@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
9867 * On error, the variable @x is set to zero.
9868 */
9869
9870+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9871+#define __get_user(x, ptr) get_user((x), (ptr))
9872+#else
9873 #define __get_user(x, ptr) \
9874 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9875+#endif
9876
9877 /**
9878 * __put_user: - Write a simple value into user space, with less checking.
9879@@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
9880 * Returns zero on success, or -EFAULT on error.
9881 */
9882
9883+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9884+#define __put_user(x, ptr) put_user((x), (ptr))
9885+#else
9886 #define __put_user(x, ptr) \
9887 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9888+#endif
9889
9890 #define __get_user_unaligned __get_user
9891 #define __put_user_unaligned __put_user
9892@@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
9893 #define get_user_ex(x, ptr) do { \
9894 unsigned long __gue_val; \
9895 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9896- (x) = (__force __typeof__(*(ptr)))__gue_val; \
9897+ (x) = (__typeof__(*(ptr)))__gue_val; \
9898 } while (0)
9899
9900 #ifdef CONFIG_X86_WP_WORKS_OK
9901diff -urNp linux-3.0.4/arch/x86/include/asm/vgtod.h linux-3.0.4/arch/x86/include/asm/vgtod.h
9902--- linux-3.0.4/arch/x86/include/asm/vgtod.h 2011-07-21 22:17:23.000000000 -0400
9903+++ linux-3.0.4/arch/x86/include/asm/vgtod.h 2011-08-23 21:47:55.000000000 -0400
9904@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9905 int sysctl_enabled;
9906 struct timezone sys_tz;
9907 struct { /* extract of a clocksource struct */
9908+ char name[8];
9909 cycle_t (*vread)(void);
9910 cycle_t cycle_last;
9911 cycle_t mask;
9912diff -urNp linux-3.0.4/arch/x86/include/asm/x86_init.h linux-3.0.4/arch/x86/include/asm/x86_init.h
9913--- linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
9914+++ linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
9915@@ -28,7 +28,7 @@ struct x86_init_mpparse {
9916 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9917 void (*find_smp_config)(void);
9918 void (*get_smp_config)(unsigned int early);
9919-};
9920+} __no_const;
9921
9922 /**
9923 * struct x86_init_resources - platform specific resource related ops
9924@@ -42,7 +42,7 @@ struct x86_init_resources {
9925 void (*probe_roms)(void);
9926 void (*reserve_resources)(void);
9927 char *(*memory_setup)(void);
9928-};
9929+} __no_const;
9930
9931 /**
9932 * struct x86_init_irqs - platform specific interrupt setup
9933@@ -55,7 +55,7 @@ struct x86_init_irqs {
9934 void (*pre_vector_init)(void);
9935 void (*intr_init)(void);
9936 void (*trap_init)(void);
9937-};
9938+} __no_const;
9939
9940 /**
9941 * struct x86_init_oem - oem platform specific customizing functions
9942@@ -65,7 +65,7 @@ struct x86_init_irqs {
9943 struct x86_init_oem {
9944 void (*arch_setup)(void);
9945 void (*banner)(void);
9946-};
9947+} __no_const;
9948
9949 /**
9950 * struct x86_init_mapping - platform specific initial kernel pagetable setup
9951@@ -76,7 +76,7 @@ struct x86_init_oem {
9952 */
9953 struct x86_init_mapping {
9954 void (*pagetable_reserve)(u64 start, u64 end);
9955-};
9956+} __no_const;
9957
9958 /**
9959 * struct x86_init_paging - platform specific paging functions
9960@@ -86,7 +86,7 @@ struct x86_init_mapping {
9961 struct x86_init_paging {
9962 void (*pagetable_setup_start)(pgd_t *base);
9963 void (*pagetable_setup_done)(pgd_t *base);
9964-};
9965+} __no_const;
9966
9967 /**
9968 * struct x86_init_timers - platform specific timer setup
9969@@ -101,7 +101,7 @@ struct x86_init_timers {
9970 void (*tsc_pre_init)(void);
9971 void (*timer_init)(void);
9972 void (*wallclock_init)(void);
9973-};
9974+} __no_const;
9975
9976 /**
9977 * struct x86_init_iommu - platform specific iommu setup
9978@@ -109,7 +109,7 @@ struct x86_init_timers {
9979 */
9980 struct x86_init_iommu {
9981 int (*iommu_init)(void);
9982-};
9983+} __no_const;
9984
9985 /**
9986 * struct x86_init_pci - platform specific pci init functions
9987@@ -123,7 +123,7 @@ struct x86_init_pci {
9988 int (*init)(void);
9989 void (*init_irq)(void);
9990 void (*fixup_irqs)(void);
9991-};
9992+} __no_const;
9993
9994 /**
9995 * struct x86_init_ops - functions for platform specific setup
9996@@ -139,7 +139,7 @@ struct x86_init_ops {
9997 struct x86_init_timers timers;
9998 struct x86_init_iommu iommu;
9999 struct x86_init_pci pci;
10000-};
10001+} __no_const;
10002
10003 /**
10004 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10005@@ -147,7 +147,7 @@ struct x86_init_ops {
10006 */
10007 struct x86_cpuinit_ops {
10008 void (*setup_percpu_clockev)(void);
10009-};
10010+} __no_const;
10011
10012 /**
10013 * struct x86_platform_ops - platform specific runtime functions
10014@@ -166,7 +166,7 @@ struct x86_platform_ops {
10015 bool (*is_untracked_pat_range)(u64 start, u64 end);
10016 void (*nmi_init)(void);
10017 int (*i8042_detect)(void);
10018-};
10019+} __no_const;
10020
10021 struct pci_dev;
10022
10023@@ -174,7 +174,7 @@ struct x86_msi_ops {
10024 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10025 void (*teardown_msi_irq)(unsigned int irq);
10026 void (*teardown_msi_irqs)(struct pci_dev *dev);
10027-};
10028+} __no_const;
10029
10030 extern struct x86_init_ops x86_init;
10031 extern struct x86_cpuinit_ops x86_cpuinit;
10032diff -urNp linux-3.0.4/arch/x86/include/asm/xsave.h linux-3.0.4/arch/x86/include/asm/xsave.h
10033--- linux-3.0.4/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10034+++ linux-3.0.4/arch/x86/include/asm/xsave.h 2011-08-23 21:47:55.000000000 -0400
10035@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10036 {
10037 int err;
10038
10039+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10040+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10041+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10042+#endif
10043+
10044 /*
10045 * Clear the xsave header first, so that reserved fields are
10046 * initialized to zero.
10047@@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10048 u32 lmask = mask;
10049 u32 hmask = mask >> 32;
10050
10051+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10052+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10053+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10054+#endif
10055+
10056 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10057 "2:\n"
10058 ".section .fixup,\"ax\"\n"
10059diff -urNp linux-3.0.4/arch/x86/Kconfig linux-3.0.4/arch/x86/Kconfig
10060--- linux-3.0.4/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
10061+++ linux-3.0.4/arch/x86/Kconfig 2011-08-23 21:48:14.000000000 -0400
10062@@ -229,7 +229,7 @@ config X86_HT
10063
10064 config X86_32_LAZY_GS
10065 def_bool y
10066- depends on X86_32 && !CC_STACKPROTECTOR
10067+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10068
10069 config ARCH_HWEIGHT_CFLAGS
10070 string
10071@@ -1018,7 +1018,7 @@ choice
10072
10073 config NOHIGHMEM
10074 bool "off"
10075- depends on !X86_NUMAQ
10076+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10077 ---help---
10078 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10079 However, the address space of 32-bit x86 processors is only 4
10080@@ -1055,7 +1055,7 @@ config NOHIGHMEM
10081
10082 config HIGHMEM4G
10083 bool "4GB"
10084- depends on !X86_NUMAQ
10085+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10086 ---help---
10087 Select this if you have a 32-bit processor and between 1 and 4
10088 gigabytes of physical RAM.
10089@@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10090 hex
10091 default 0xB0000000 if VMSPLIT_3G_OPT
10092 default 0x80000000 if VMSPLIT_2G
10093- default 0x78000000 if VMSPLIT_2G_OPT
10094+ default 0x70000000 if VMSPLIT_2G_OPT
10095 default 0x40000000 if VMSPLIT_1G
10096 default 0xC0000000
10097 depends on X86_32
10098@@ -1453,7 +1453,7 @@ config ARCH_USES_PG_UNCACHED
10099
10100 config EFI
10101 bool "EFI runtime service support"
10102- depends on ACPI
10103+ depends on ACPI && !PAX_KERNEXEC
10104 ---help---
10105 This enables the kernel to use EFI runtime services that are
10106 available (such as the EFI variable services).
10107@@ -1483,6 +1483,7 @@ config SECCOMP
10108
10109 config CC_STACKPROTECTOR
10110 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10111+ depends on X86_64 || !PAX_MEMORY_UDEREF
10112 ---help---
10113 This option turns on the -fstack-protector GCC feature. This
10114 feature puts, at the beginning of functions, a canary value on
10115@@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10116 config PHYSICAL_START
10117 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10118 default "0x1000000"
10119+ range 0x400000 0x40000000
10120 ---help---
10121 This gives the physical address where the kernel is loaded.
10122
10123@@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10124 config PHYSICAL_ALIGN
10125 hex "Alignment value to which kernel should be aligned" if X86_32
10126 default "0x1000000"
10127+ range 0x400000 0x1000000 if PAX_KERNEXEC
10128 range 0x2000 0x1000000
10129 ---help---
10130 This value puts the alignment restrictions on physical address
10131@@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10132 Say N if you want to disable CPU hotplug.
10133
10134 config COMPAT_VDSO
10135- def_bool y
10136+ def_bool n
10137 prompt "Compat VDSO support"
10138 depends on X86_32 || IA32_EMULATION
10139+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10140 ---help---
10141 Map the 32-bit VDSO to the predictable old-style address too.
10142
10143diff -urNp linux-3.0.4/arch/x86/Kconfig.cpu linux-3.0.4/arch/x86/Kconfig.cpu
10144--- linux-3.0.4/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
10145+++ linux-3.0.4/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
10146@@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10147
10148 config X86_F00F_BUG
10149 def_bool y
10150- depends on M586MMX || M586TSC || M586 || M486 || M386
10151+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10152
10153 config X86_INVD_BUG
10154 def_bool y
10155@@ -362,7 +362,7 @@ config X86_POPAD_OK
10156
10157 config X86_ALIGNMENT_16
10158 def_bool y
10159- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10160+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10161
10162 config X86_INTEL_USERCOPY
10163 def_bool y
10164@@ -408,7 +408,7 @@ config X86_CMPXCHG64
10165 # generates cmov.
10166 config X86_CMOV
10167 def_bool y
10168- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10169+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10170
10171 config X86_MINIMUM_CPU_FAMILY
10172 int
10173diff -urNp linux-3.0.4/arch/x86/Kconfig.debug linux-3.0.4/arch/x86/Kconfig.debug
10174--- linux-3.0.4/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
10175+++ linux-3.0.4/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
10176@@ -81,7 +81,7 @@ config X86_PTDUMP
10177 config DEBUG_RODATA
10178 bool "Write protect kernel read-only data structures"
10179 default y
10180- depends on DEBUG_KERNEL
10181+ depends on DEBUG_KERNEL && BROKEN
10182 ---help---
10183 Mark the kernel read-only data as write-protected in the pagetables,
10184 in order to catch accidental (and incorrect) writes to such const
10185@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10186
10187 config DEBUG_SET_MODULE_RONX
10188 bool "Set loadable kernel module data as NX and text as RO"
10189- depends on MODULES
10190+ depends on MODULES && BROKEN
10191 ---help---
10192 This option helps catch unintended modifications to loadable
10193 kernel module's text and read-only data. It also prevents execution
10194diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile
10195--- linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10196+++ linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10197@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10198 $(call cc-option, -fno-stack-protector) \
10199 $(call cc-option, -mpreferred-stack-boundary=2)
10200 KBUILD_CFLAGS += $(call cc-option, -m32)
10201+ifdef CONSTIFY_PLUGIN
10202+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10203+endif
10204 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10205 GCOV_PROFILE := n
10206
10207diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S
10208--- linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10209+++ linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10210@@ -108,6 +108,9 @@ wakeup_code:
10211 /* Do any other stuff... */
10212
10213 #ifndef CONFIG_64BIT
10214+ /* Recheck NX bit overrides (64bit path does this in trampoline */
10215+ call verify_cpu
10216+
10217 /* This could also be done in C code... */
10218 movl pmode_cr3, %eax
10219 movl %eax, %cr3
10220@@ -131,6 +134,7 @@ wakeup_code:
10221 movl pmode_cr0, %eax
10222 movl %eax, %cr0
10223 jmp pmode_return
10224+# include "../../verify_cpu.S"
10225 #else
10226 pushw $0
10227 pushw trampoline_segment
10228diff -urNp linux-3.0.4/arch/x86/kernel/acpi/sleep.c linux-3.0.4/arch/x86/kernel/acpi/sleep.c
10229--- linux-3.0.4/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10230+++ linux-3.0.4/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10231@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10232 header->trampoline_segment = trampoline_address() >> 4;
10233 #ifdef CONFIG_SMP
10234 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10235+
10236+ pax_open_kernel();
10237 early_gdt_descr.address =
10238 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10239+ pax_close_kernel();
10240+
10241 initial_gs = per_cpu_offset(smp_processor_id());
10242 #endif
10243 initial_code = (unsigned long)wakeup_long64;
10244diff -urNp linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S
10245--- linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10246+++ linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10247@@ -30,13 +30,11 @@ wakeup_pmode_return:
10248 # and restore the stack ... but you need gdt for this to work
10249 movl saved_context_esp, %esp
10250
10251- movl %cs:saved_magic, %eax
10252- cmpl $0x12345678, %eax
10253+ cmpl $0x12345678, saved_magic
10254 jne bogus_magic
10255
10256 # jump to place where we left off
10257- movl saved_eip, %eax
10258- jmp *%eax
10259+ jmp *(saved_eip)
10260
10261 bogus_magic:
10262 jmp bogus_magic
10263diff -urNp linux-3.0.4/arch/x86/kernel/alternative.c linux-3.0.4/arch/x86/kernel/alternative.c
10264--- linux-3.0.4/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10265+++ linux-3.0.4/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10266@@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10267 if (!*poff || ptr < text || ptr >= text_end)
10268 continue;
10269 /* turn DS segment override prefix into lock prefix */
10270- if (*ptr == 0x3e)
10271+ if (*ktla_ktva(ptr) == 0x3e)
10272 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10273 };
10274 mutex_unlock(&text_mutex);
10275@@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10276 if (!*poff || ptr < text || ptr >= text_end)
10277 continue;
10278 /* turn lock prefix into DS segment override prefix */
10279- if (*ptr == 0xf0)
10280+ if (*ktla_ktva(ptr) == 0xf0)
10281 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10282 };
10283 mutex_unlock(&text_mutex);
10284@@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10285
10286 BUG_ON(p->len > MAX_PATCH_LEN);
10287 /* prep the buffer with the original instructions */
10288- memcpy(insnbuf, p->instr, p->len);
10289+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10290 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10291 (unsigned long)p->instr, p->len);
10292
10293@@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10294 if (smp_alt_once)
10295 free_init_pages("SMP alternatives",
10296 (unsigned long)__smp_locks,
10297- (unsigned long)__smp_locks_end);
10298+ PAGE_ALIGN((unsigned long)__smp_locks_end));
10299
10300 restart_nmi();
10301 }
10302@@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10303 * instructions. And on the local CPU you need to be protected again NMI or MCE
10304 * handlers seeing an inconsistent instruction while you patch.
10305 */
10306-void *__init_or_module text_poke_early(void *addr, const void *opcode,
10307+void *__kprobes text_poke_early(void *addr, const void *opcode,
10308 size_t len)
10309 {
10310 unsigned long flags;
10311 local_irq_save(flags);
10312- memcpy(addr, opcode, len);
10313+
10314+ pax_open_kernel();
10315+ memcpy(ktla_ktva(addr), opcode, len);
10316 sync_core();
10317+ pax_close_kernel();
10318+
10319 local_irq_restore(flags);
10320 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10321 that causes hangs on some VIA CPUs. */
10322@@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10323 */
10324 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10325 {
10326- unsigned long flags;
10327- char *vaddr;
10328+ unsigned char *vaddr = ktla_ktva(addr);
10329 struct page *pages[2];
10330- int i;
10331+ size_t i;
10332
10333 if (!core_kernel_text((unsigned long)addr)) {
10334- pages[0] = vmalloc_to_page(addr);
10335- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10336+ pages[0] = vmalloc_to_page(vaddr);
10337+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10338 } else {
10339- pages[0] = virt_to_page(addr);
10340+ pages[0] = virt_to_page(vaddr);
10341 WARN_ON(!PageReserved(pages[0]));
10342- pages[1] = virt_to_page(addr + PAGE_SIZE);
10343+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10344 }
10345 BUG_ON(!pages[0]);
10346- local_irq_save(flags);
10347- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10348- if (pages[1])
10349- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10350- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10351- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10352- clear_fixmap(FIX_TEXT_POKE0);
10353- if (pages[1])
10354- clear_fixmap(FIX_TEXT_POKE1);
10355- local_flush_tlb();
10356- sync_core();
10357- /* Could also do a CLFLUSH here to speed up CPU recovery; but
10358- that causes hangs on some VIA CPUs. */
10359+ text_poke_early(addr, opcode, len);
10360 for (i = 0; i < len; i++)
10361- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10362- local_irq_restore(flags);
10363+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10364 return addr;
10365 }
10366
10367diff -urNp linux-3.0.4/arch/x86/kernel/apic/apic.c linux-3.0.4/arch/x86/kernel/apic/apic.c
10368--- linux-3.0.4/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10369+++ linux-3.0.4/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10370@@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10371 /*
10372 * Debug level, exported for io_apic.c
10373 */
10374-unsigned int apic_verbosity;
10375+int apic_verbosity;
10376
10377 int pic_mode;
10378
10379@@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10380 apic_write(APIC_ESR, 0);
10381 v1 = apic_read(APIC_ESR);
10382 ack_APIC_irq();
10383- atomic_inc(&irq_err_count);
10384+ atomic_inc_unchecked(&irq_err_count);
10385
10386 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10387 smp_processor_id(), v0 , v1);
10388@@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10389 u16 *bios_cpu_apicid;
10390 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10391
10392+ pax_track_stack();
10393+
10394 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10395 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10396
10397diff -urNp linux-3.0.4/arch/x86/kernel/apic/io_apic.c linux-3.0.4/arch/x86/kernel/apic/io_apic.c
10398--- linux-3.0.4/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10399+++ linux-3.0.4/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10400@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10401 }
10402 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10403
10404-void lock_vector_lock(void)
10405+void lock_vector_lock(void) __acquires(vector_lock)
10406 {
10407 /* Used to the online set of cpus does not change
10408 * during assign_irq_vector.
10409@@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10410 raw_spin_lock(&vector_lock);
10411 }
10412
10413-void unlock_vector_lock(void)
10414+void unlock_vector_lock(void) __releases(vector_lock)
10415 {
10416 raw_spin_unlock(&vector_lock);
10417 }
10418@@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10419 ack_APIC_irq();
10420 }
10421
10422-atomic_t irq_mis_count;
10423+atomic_unchecked_t irq_mis_count;
10424
10425 /*
10426 * IO-APIC versions below 0x20 don't support EOI register.
10427@@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10428 * at the cpu.
10429 */
10430 if (!(v & (1 << (i & 0x1f)))) {
10431- atomic_inc(&irq_mis_count);
10432+ atomic_inc_unchecked(&irq_mis_count);
10433
10434 eoi_ioapic_irq(irq, cfg);
10435 }
10436diff -urNp linux-3.0.4/arch/x86/kernel/apm_32.c linux-3.0.4/arch/x86/kernel/apm_32.c
10437--- linux-3.0.4/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
10438+++ linux-3.0.4/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
10439@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10440 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10441 * even though they are called in protected mode.
10442 */
10443-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10444+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10445 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10446
10447 static const char driver_version[] = "1.16ac"; /* no spaces */
10448@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10449 BUG_ON(cpu != 0);
10450 gdt = get_cpu_gdt_table(cpu);
10451 save_desc_40 = gdt[0x40 / 8];
10452+
10453+ pax_open_kernel();
10454 gdt[0x40 / 8] = bad_bios_desc;
10455+ pax_close_kernel();
10456
10457 apm_irq_save(flags);
10458 APM_DO_SAVE_SEGS;
10459@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10460 &call->esi);
10461 APM_DO_RESTORE_SEGS;
10462 apm_irq_restore(flags);
10463+
10464+ pax_open_kernel();
10465 gdt[0x40 / 8] = save_desc_40;
10466+ pax_close_kernel();
10467+
10468 put_cpu();
10469
10470 return call->eax & 0xff;
10471@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10472 BUG_ON(cpu != 0);
10473 gdt = get_cpu_gdt_table(cpu);
10474 save_desc_40 = gdt[0x40 / 8];
10475+
10476+ pax_open_kernel();
10477 gdt[0x40 / 8] = bad_bios_desc;
10478+ pax_close_kernel();
10479
10480 apm_irq_save(flags);
10481 APM_DO_SAVE_SEGS;
10482@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10483 &call->eax);
10484 APM_DO_RESTORE_SEGS;
10485 apm_irq_restore(flags);
10486+
10487+ pax_open_kernel();
10488 gdt[0x40 / 8] = save_desc_40;
10489+ pax_close_kernel();
10490+
10491 put_cpu();
10492 return error;
10493 }
10494@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10495 * code to that CPU.
10496 */
10497 gdt = get_cpu_gdt_table(0);
10498+
10499+ pax_open_kernel();
10500 set_desc_base(&gdt[APM_CS >> 3],
10501 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10502 set_desc_base(&gdt[APM_CS_16 >> 3],
10503 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10504 set_desc_base(&gdt[APM_DS >> 3],
10505 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10506+ pax_close_kernel();
10507
10508 proc_create("apm", 0, NULL, &apm_file_ops);
10509
10510diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets_64.c linux-3.0.4/arch/x86/kernel/asm-offsets_64.c
10511--- linux-3.0.4/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
10512+++ linux-3.0.4/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
10513@@ -69,6 +69,7 @@ int main(void)
10514 BLANK();
10515 #undef ENTRY
10516
10517+ DEFINE(TSS_size, sizeof(struct tss_struct));
10518 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10519 BLANK();
10520
10521diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets.c linux-3.0.4/arch/x86/kernel/asm-offsets.c
10522--- linux-3.0.4/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
10523+++ linux-3.0.4/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
10524@@ -33,6 +33,8 @@ void common(void) {
10525 OFFSET(TI_status, thread_info, status);
10526 OFFSET(TI_addr_limit, thread_info, addr_limit);
10527 OFFSET(TI_preempt_count, thread_info, preempt_count);
10528+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10529+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10530
10531 BLANK();
10532 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10533@@ -53,8 +55,26 @@ void common(void) {
10534 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10535 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10536 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10537+
10538+#ifdef CONFIG_PAX_KERNEXEC
10539+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10540+#endif
10541+
10542+#ifdef CONFIG_PAX_MEMORY_UDEREF
10543+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10544+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10545+#ifdef CONFIG_X86_64
10546+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10547+#endif
10548 #endif
10549
10550+#endif
10551+
10552+ BLANK();
10553+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10554+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10555+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10556+
10557 #ifdef CONFIG_XEN
10558 BLANK();
10559 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10560diff -urNp linux-3.0.4/arch/x86/kernel/cpu/amd.c linux-3.0.4/arch/x86/kernel/cpu/amd.c
10561--- linux-3.0.4/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
10562+++ linux-3.0.4/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
10563@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10564 unsigned int size)
10565 {
10566 /* AMD errata T13 (order #21922) */
10567- if ((c->x86 == 6)) {
10568+ if (c->x86 == 6) {
10569 /* Duron Rev A0 */
10570 if (c->x86_model == 3 && c->x86_mask == 0)
10571 size = 64;
10572diff -urNp linux-3.0.4/arch/x86/kernel/cpu/common.c linux-3.0.4/arch/x86/kernel/cpu/common.c
10573--- linux-3.0.4/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
10574+++ linux-3.0.4/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
10575@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10576
10577 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10578
10579-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10580-#ifdef CONFIG_X86_64
10581- /*
10582- * We need valid kernel segments for data and code in long mode too
10583- * IRET will check the segment types kkeil 2000/10/28
10584- * Also sysret mandates a special GDT layout
10585- *
10586- * TLS descriptors are currently at a different place compared to i386.
10587- * Hopefully nobody expects them at a fixed place (Wine?)
10588- */
10589- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10590- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10591- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10592- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10593- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10594- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10595-#else
10596- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10597- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10598- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10599- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10600- /*
10601- * Segments used for calling PnP BIOS have byte granularity.
10602- * They code segments and data segments have fixed 64k limits,
10603- * the transfer segment sizes are set at run time.
10604- */
10605- /* 32-bit code */
10606- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10607- /* 16-bit code */
10608- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10609- /* 16-bit data */
10610- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10611- /* 16-bit data */
10612- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10613- /* 16-bit data */
10614- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10615- /*
10616- * The APM segments have byte granularity and their bases
10617- * are set at run time. All have 64k limits.
10618- */
10619- /* 32-bit code */
10620- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10621- /* 16-bit code */
10622- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10623- /* data */
10624- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10625-
10626- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10627- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10628- GDT_STACK_CANARY_INIT
10629-#endif
10630-} };
10631-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10632-
10633 static int __init x86_xsave_setup(char *s)
10634 {
10635 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10636@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10637 {
10638 struct desc_ptr gdt_descr;
10639
10640- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10641+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10642 gdt_descr.size = GDT_SIZE - 1;
10643 load_gdt(&gdt_descr);
10644 /* Reload the per-cpu base */
10645@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10646 /* Filter out anything that depends on CPUID levels we don't have */
10647 filter_cpuid_features(c, true);
10648
10649+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10650+ setup_clear_cpu_cap(X86_FEATURE_SEP);
10651+#endif
10652+
10653 /* If the model name is still unset, do table lookup. */
10654 if (!c->x86_model_id[0]) {
10655 const char *p;
10656@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10657 }
10658 __setup("clearcpuid=", setup_disablecpuid);
10659
10660+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10661+EXPORT_PER_CPU_SYMBOL(current_tinfo);
10662+
10663 #ifdef CONFIG_X86_64
10664 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10665
10666@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10667 EXPORT_PER_CPU_SYMBOL(current_task);
10668
10669 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10670- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10671+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10672 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10673
10674 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10675@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10676 {
10677 memset(regs, 0, sizeof(struct pt_regs));
10678 regs->fs = __KERNEL_PERCPU;
10679- regs->gs = __KERNEL_STACK_CANARY;
10680+ savesegment(gs, regs->gs);
10681
10682 return regs;
10683 }
10684@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10685 int i;
10686
10687 cpu = stack_smp_processor_id();
10688- t = &per_cpu(init_tss, cpu);
10689+ t = init_tss + cpu;
10690 oist = &per_cpu(orig_ist, cpu);
10691
10692 #ifdef CONFIG_NUMA
10693@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10694 switch_to_new_gdt(cpu);
10695 loadsegment(fs, 0);
10696
10697- load_idt((const struct desc_ptr *)&idt_descr);
10698+ load_idt(&idt_descr);
10699
10700 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10701 syscall_init();
10702@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10703 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10704 barrier();
10705
10706- x86_configure_nx();
10707 if (cpu != 0)
10708 enable_x2apic();
10709
10710@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10711 {
10712 int cpu = smp_processor_id();
10713 struct task_struct *curr = current;
10714- struct tss_struct *t = &per_cpu(init_tss, cpu);
10715+ struct tss_struct *t = init_tss + cpu;
10716 struct thread_struct *thread = &curr->thread;
10717
10718 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10719diff -urNp linux-3.0.4/arch/x86/kernel/cpu/intel.c linux-3.0.4/arch/x86/kernel/cpu/intel.c
10720--- linux-3.0.4/arch/x86/kernel/cpu/intel.c 2011-08-29 23:26:13.000000000 -0400
10721+++ linux-3.0.4/arch/x86/kernel/cpu/intel.c 2011-08-29 23:30:14.000000000 -0400
10722@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10723 * Update the IDT descriptor and reload the IDT so that
10724 * it uses the read-only mapped virtual address.
10725 */
10726- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10727+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10728 load_idt(&idt_descr);
10729 }
10730 #endif
10731diff -urNp linux-3.0.4/arch/x86/kernel/cpu/Makefile linux-3.0.4/arch/x86/kernel/cpu/Makefile
10732--- linux-3.0.4/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
10733+++ linux-3.0.4/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
10734@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10735 CFLAGS_REMOVE_perf_event.o = -pg
10736 endif
10737
10738-# Make sure load_percpu_segment has no stackprotector
10739-nostackp := $(call cc-option, -fno-stack-protector)
10740-CFLAGS_common.o := $(nostackp)
10741-
10742 obj-y := intel_cacheinfo.o scattered.o topology.o
10743 obj-y += proc.o capflags.o powerflags.o common.o
10744 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10745diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c
10746--- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
10747+++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
10748@@ -46,6 +46,7 @@
10749 #include <asm/ipi.h>
10750 #include <asm/mce.h>
10751 #include <asm/msr.h>
10752+#include <asm/local.h>
10753
10754 #include "mce-internal.h"
10755
10756@@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
10757 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10758 m->cs, m->ip);
10759
10760- if (m->cs == __KERNEL_CS)
10761+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10762 print_symbol("{%s}", m->ip);
10763 pr_cont("\n");
10764 }
10765@@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
10766
10767 #define PANIC_TIMEOUT 5 /* 5 seconds */
10768
10769-static atomic_t mce_paniced;
10770+static atomic_unchecked_t mce_paniced;
10771
10772 static int fake_panic;
10773-static atomic_t mce_fake_paniced;
10774+static atomic_unchecked_t mce_fake_paniced;
10775
10776 /* Panic in progress. Enable interrupts and wait for final IPI */
10777 static void wait_for_panic(void)
10778@@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
10779 /*
10780 * Make sure only one CPU runs in machine check panic
10781 */
10782- if (atomic_inc_return(&mce_paniced) > 1)
10783+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10784 wait_for_panic();
10785 barrier();
10786
10787@@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10788 console_verbose();
10789 } else {
10790 /* Don't log too much for fake panic */
10791- if (atomic_inc_return(&mce_fake_paniced) > 1)
10792+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10793 return;
10794 }
10795 /* First print corrected ones that are still unlogged */
10796@@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
10797 * might have been modified by someone else.
10798 */
10799 rmb();
10800- if (atomic_read(&mce_paniced))
10801+ if (atomic_read_unchecked(&mce_paniced))
10802 wait_for_panic();
10803 if (!monarch_timeout)
10804 goto out;
10805@@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10806 */
10807
10808 static DEFINE_SPINLOCK(mce_state_lock);
10809-static int open_count; /* #times opened */
10810+static local_t open_count; /* #times opened */
10811 static int open_exclu; /* already open exclusive? */
10812
10813 static int mce_open(struct inode *inode, struct file *file)
10814 {
10815 spin_lock(&mce_state_lock);
10816
10817- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10818+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10819 spin_unlock(&mce_state_lock);
10820
10821 return -EBUSY;
10822@@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
10823
10824 if (file->f_flags & O_EXCL)
10825 open_exclu = 1;
10826- open_count++;
10827+ local_inc(&open_count);
10828
10829 spin_unlock(&mce_state_lock);
10830
10831@@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
10832 {
10833 spin_lock(&mce_state_lock);
10834
10835- open_count--;
10836+ local_dec(&open_count);
10837 open_exclu = 0;
10838
10839 spin_unlock(&mce_state_lock);
10840@@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
10841 static void mce_reset(void)
10842 {
10843 cpu_missing = 0;
10844- atomic_set(&mce_fake_paniced, 0);
10845+ atomic_set_unchecked(&mce_fake_paniced, 0);
10846 atomic_set(&mce_executing, 0);
10847 atomic_set(&mce_callin, 0);
10848 atomic_set(&global_nwo, 0);
10849diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c
10850--- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
10851+++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
10852@@ -215,7 +215,9 @@ static int inject_init(void)
10853 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10854 return -ENOMEM;
10855 printk(KERN_INFO "Machine check injector initialized\n");
10856- mce_chrdev_ops.write = mce_write;
10857+ pax_open_kernel();
10858+ *(void **)&mce_chrdev_ops.write = mce_write;
10859+ pax_close_kernel();
10860 register_die_notifier(&mce_raise_nb);
10861 return 0;
10862 }
10863diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c
10864--- linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:13.000000000 -0400
10865+++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:21.000000000 -0400
10866@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10867 u64 size_or_mask, size_and_mask;
10868 static bool mtrr_aps_delayed_init;
10869
10870-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10871+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10872
10873 const struct mtrr_ops *mtrr_if;
10874
10875diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h
10876--- linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
10877+++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 19:49:56.000000000 -0400
10878@@ -25,7 +25,7 @@ struct mtrr_ops {
10879 int (*validate_add_page)(unsigned long base, unsigned long size,
10880 unsigned int type);
10881 int (*have_wrcomb)(void);
10882-};
10883+} __do_const;
10884
10885 extern int generic_get_free_region(unsigned long base, unsigned long size,
10886 int replace_reg);
10887diff -urNp linux-3.0.4/arch/x86/kernel/cpu/perf_event.c linux-3.0.4/arch/x86/kernel/cpu/perf_event.c
10888--- linux-3.0.4/arch/x86/kernel/cpu/perf_event.c 2011-07-21 22:17:23.000000000 -0400
10889+++ linux-3.0.4/arch/x86/kernel/cpu/perf_event.c 2011-08-23 21:48:14.000000000 -0400
10890@@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
10891 int i, j, w, wmax, num = 0;
10892 struct hw_perf_event *hwc;
10893
10894+ pax_track_stack();
10895+
10896 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10897
10898 for (i = 0; i < n; i++) {
10899@@ -1872,7 +1874,7 @@ perf_callchain_user(struct perf_callchai
10900 break;
10901
10902 perf_callchain_store(entry, frame.return_address);
10903- fp = frame.next_frame;
10904+ fp = (__force const void __user *)frame.next_frame;
10905 }
10906 }
10907
10908diff -urNp linux-3.0.4/arch/x86/kernel/crash.c linux-3.0.4/arch/x86/kernel/crash.c
10909--- linux-3.0.4/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
10910+++ linux-3.0.4/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
10911@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10912 regs = args->regs;
10913
10914 #ifdef CONFIG_X86_32
10915- if (!user_mode_vm(regs)) {
10916+ if (!user_mode(regs)) {
10917 crash_fixup_ss_esp(&fixed_regs, regs);
10918 regs = &fixed_regs;
10919 }
10920diff -urNp linux-3.0.4/arch/x86/kernel/doublefault_32.c linux-3.0.4/arch/x86/kernel/doublefault_32.c
10921--- linux-3.0.4/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
10922+++ linux-3.0.4/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
10923@@ -11,7 +11,7 @@
10924
10925 #define DOUBLEFAULT_STACKSIZE (1024)
10926 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10927-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10928+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10929
10930 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10931
10932@@ -21,7 +21,7 @@ static void doublefault_fn(void)
10933 unsigned long gdt, tss;
10934
10935 store_gdt(&gdt_desc);
10936- gdt = gdt_desc.address;
10937+ gdt = (unsigned long)gdt_desc.address;
10938
10939 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10940
10941@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10942 /* 0x2 bit is always set */
10943 .flags = X86_EFLAGS_SF | 0x2,
10944 .sp = STACK_START,
10945- .es = __USER_DS,
10946+ .es = __KERNEL_DS,
10947 .cs = __KERNEL_CS,
10948 .ss = __KERNEL_DS,
10949- .ds = __USER_DS,
10950+ .ds = __KERNEL_DS,
10951 .fs = __KERNEL_PERCPU,
10952
10953 .__cr3 = __pa_nodebug(swapper_pg_dir),
10954diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_32.c linux-3.0.4/arch/x86/kernel/dumpstack_32.c
10955--- linux-3.0.4/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
10956+++ linux-3.0.4/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
10957@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
10958 bp = stack_frame(task, regs);
10959
10960 for (;;) {
10961- struct thread_info *context;
10962+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
10963
10964- context = (struct thread_info *)
10965- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
10966- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
10967+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
10968
10969- stack = (unsigned long *)context->previous_esp;
10970- if (!stack)
10971+ if (stack_start == task_stack_page(task))
10972 break;
10973+ stack = *(unsigned long **)stack_start;
10974 if (ops->stack(data, "IRQ") < 0)
10975 break;
10976 touch_nmi_watchdog();
10977@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
10978 * When in-kernel, we also print out the stack and code at the
10979 * time of the fault..
10980 */
10981- if (!user_mode_vm(regs)) {
10982+ if (!user_mode(regs)) {
10983 unsigned int code_prologue = code_bytes * 43 / 64;
10984 unsigned int code_len = code_bytes;
10985 unsigned char c;
10986 u8 *ip;
10987+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
10988
10989 printk(KERN_EMERG "Stack:\n");
10990 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
10991
10992 printk(KERN_EMERG "Code: ");
10993
10994- ip = (u8 *)regs->ip - code_prologue;
10995+ ip = (u8 *)regs->ip - code_prologue + cs_base;
10996 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
10997 /* try starting at IP */
10998- ip = (u8 *)regs->ip;
10999+ ip = (u8 *)regs->ip + cs_base;
11000 code_len = code_len - code_prologue + 1;
11001 }
11002 for (i = 0; i < code_len; i++, ip++) {
11003@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11004 printk(" Bad EIP value.");
11005 break;
11006 }
11007- if (ip == (u8 *)regs->ip)
11008+ if (ip == (u8 *)regs->ip + cs_base)
11009 printk("<%02x> ", c);
11010 else
11011 printk("%02x ", c);
11012@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11013 {
11014 unsigned short ud2;
11015
11016+ ip = ktla_ktva(ip);
11017 if (ip < PAGE_OFFSET)
11018 return 0;
11019 if (probe_kernel_address((unsigned short *)ip, ud2))
11020diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_64.c linux-3.0.4/arch/x86/kernel/dumpstack_64.c
11021--- linux-3.0.4/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11022+++ linux-3.0.4/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11023@@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11024 unsigned long *irq_stack_end =
11025 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11026 unsigned used = 0;
11027- struct thread_info *tinfo;
11028 int graph = 0;
11029 unsigned long dummy;
11030+ void *stack_start;
11031
11032 if (!task)
11033 task = current;
11034@@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11035 * current stack address. If the stacks consist of nested
11036 * exceptions
11037 */
11038- tinfo = task_thread_info(task);
11039 for (;;) {
11040 char *id;
11041 unsigned long *estack_end;
11042+
11043 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11044 &used, &id);
11045
11046@@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11047 if (ops->stack(data, id) < 0)
11048 break;
11049
11050- bp = ops->walk_stack(tinfo, stack, bp, ops,
11051+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11052 data, estack_end, &graph);
11053 ops->stack(data, "<EOE>");
11054 /*
11055@@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11056 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11057 if (ops->stack(data, "IRQ") < 0)
11058 break;
11059- bp = ops->walk_stack(tinfo, stack, bp,
11060+ bp = ops->walk_stack(task, irq_stack, stack, bp,
11061 ops, data, irq_stack_end, &graph);
11062 /*
11063 * We link to the next stack (which would be
11064@@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11065 /*
11066 * This handles the process stack:
11067 */
11068- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11069+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11070+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11071 put_cpu();
11072 }
11073 EXPORT_SYMBOL(dump_trace);
11074diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack.c linux-3.0.4/arch/x86/kernel/dumpstack.c
11075--- linux-3.0.4/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11076+++ linux-3.0.4/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11077@@ -2,6 +2,9 @@
11078 * Copyright (C) 1991, 1992 Linus Torvalds
11079 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11080 */
11081+#ifdef CONFIG_GRKERNSEC_HIDESYM
11082+#define __INCLUDED_BY_HIDESYM 1
11083+#endif
11084 #include <linux/kallsyms.h>
11085 #include <linux/kprobes.h>
11086 #include <linux/uaccess.h>
11087@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11088 static void
11089 print_ftrace_graph_addr(unsigned long addr, void *data,
11090 const struct stacktrace_ops *ops,
11091- struct thread_info *tinfo, int *graph)
11092+ struct task_struct *task, int *graph)
11093 {
11094- struct task_struct *task = tinfo->task;
11095 unsigned long ret_addr;
11096 int index = task->curr_ret_stack;
11097
11098@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11099 static inline void
11100 print_ftrace_graph_addr(unsigned long addr, void *data,
11101 const struct stacktrace_ops *ops,
11102- struct thread_info *tinfo, int *graph)
11103+ struct task_struct *task, int *graph)
11104 { }
11105 #endif
11106
11107@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11108 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11109 */
11110
11111-static inline int valid_stack_ptr(struct thread_info *tinfo,
11112- void *p, unsigned int size, void *end)
11113+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11114 {
11115- void *t = tinfo;
11116 if (end) {
11117 if (p < end && p >= (end-THREAD_SIZE))
11118 return 1;
11119@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11120 }
11121
11122 unsigned long
11123-print_context_stack(struct thread_info *tinfo,
11124+print_context_stack(struct task_struct *task, void *stack_start,
11125 unsigned long *stack, unsigned long bp,
11126 const struct stacktrace_ops *ops, void *data,
11127 unsigned long *end, int *graph)
11128 {
11129 struct stack_frame *frame = (struct stack_frame *)bp;
11130
11131- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11132+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11133 unsigned long addr;
11134
11135 addr = *stack;
11136@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11137 } else {
11138 ops->address(data, addr, 0);
11139 }
11140- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11141+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11142 }
11143 stack++;
11144 }
11145@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11146 EXPORT_SYMBOL_GPL(print_context_stack);
11147
11148 unsigned long
11149-print_context_stack_bp(struct thread_info *tinfo,
11150+print_context_stack_bp(struct task_struct *task, void *stack_start,
11151 unsigned long *stack, unsigned long bp,
11152 const struct stacktrace_ops *ops, void *data,
11153 unsigned long *end, int *graph)
11154@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11155 struct stack_frame *frame = (struct stack_frame *)bp;
11156 unsigned long *ret_addr = &frame->return_address;
11157
11158- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11159+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11160 unsigned long addr = *ret_addr;
11161
11162 if (!__kernel_text_address(addr))
11163@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11164 ops->address(data, addr, 1);
11165 frame = frame->next_frame;
11166 ret_addr = &frame->return_address;
11167- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11168+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11169 }
11170
11171 return (unsigned long)frame;
11172@@ -186,7 +186,7 @@ void dump_stack(void)
11173
11174 bp = stack_frame(current, NULL);
11175 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11176- current->pid, current->comm, print_tainted(),
11177+ task_pid_nr(current), current->comm, print_tainted(),
11178 init_utsname()->release,
11179 (int)strcspn(init_utsname()->version, " "),
11180 init_utsname()->version);
11181@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11182 }
11183 EXPORT_SYMBOL_GPL(oops_begin);
11184
11185+extern void gr_handle_kernel_exploit(void);
11186+
11187 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11188 {
11189 if (regs && kexec_should_crash(current))
11190@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11191 panic("Fatal exception in interrupt");
11192 if (panic_on_oops)
11193 panic("Fatal exception");
11194- do_exit(signr);
11195+
11196+ gr_handle_kernel_exploit();
11197+
11198+ do_group_exit(signr);
11199 }
11200
11201 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11202@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11203
11204 show_registers(regs);
11205 #ifdef CONFIG_X86_32
11206- if (user_mode_vm(regs)) {
11207+ if (user_mode(regs)) {
11208 sp = regs->sp;
11209 ss = regs->ss & 0xffff;
11210 } else {
11211@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11212 unsigned long flags = oops_begin();
11213 int sig = SIGSEGV;
11214
11215- if (!user_mode_vm(regs))
11216+ if (!user_mode(regs))
11217 report_bug(regs->ip, regs);
11218
11219 if (__die(str, regs, err))
11220diff -urNp linux-3.0.4/arch/x86/kernel/early_printk.c linux-3.0.4/arch/x86/kernel/early_printk.c
11221--- linux-3.0.4/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11222+++ linux-3.0.4/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11223@@ -7,6 +7,7 @@
11224 #include <linux/pci_regs.h>
11225 #include <linux/pci_ids.h>
11226 #include <linux/errno.h>
11227+#include <linux/sched.h>
11228 #include <asm/io.h>
11229 #include <asm/processor.h>
11230 #include <asm/fcntl.h>
11231@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11232 int n;
11233 va_list ap;
11234
11235+ pax_track_stack();
11236+
11237 va_start(ap, fmt);
11238 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11239 early_console->write(early_console, buf, n);
11240diff -urNp linux-3.0.4/arch/x86/kernel/entry_32.S linux-3.0.4/arch/x86/kernel/entry_32.S
11241--- linux-3.0.4/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11242+++ linux-3.0.4/arch/x86/kernel/entry_32.S 2011-08-30 18:23:52.000000000 -0400
11243@@ -185,13 +185,146 @@
11244 /*CFI_REL_OFFSET gs, PT_GS*/
11245 .endm
11246 .macro SET_KERNEL_GS reg
11247+
11248+#ifdef CONFIG_CC_STACKPROTECTOR
11249 movl $(__KERNEL_STACK_CANARY), \reg
11250+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11251+ movl $(__USER_DS), \reg
11252+#else
11253+ xorl \reg, \reg
11254+#endif
11255+
11256 movl \reg, %gs
11257 .endm
11258
11259 #endif /* CONFIG_X86_32_LAZY_GS */
11260
11261-.macro SAVE_ALL
11262+.macro pax_enter_kernel
11263+#ifdef CONFIG_PAX_KERNEXEC
11264+ call pax_enter_kernel
11265+#endif
11266+.endm
11267+
11268+.macro pax_exit_kernel
11269+#ifdef CONFIG_PAX_KERNEXEC
11270+ call pax_exit_kernel
11271+#endif
11272+.endm
11273+
11274+#ifdef CONFIG_PAX_KERNEXEC
11275+ENTRY(pax_enter_kernel)
11276+#ifdef CONFIG_PARAVIRT
11277+ pushl %eax
11278+ pushl %ecx
11279+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11280+ mov %eax, %esi
11281+#else
11282+ mov %cr0, %esi
11283+#endif
11284+ bts $16, %esi
11285+ jnc 1f
11286+ mov %cs, %esi
11287+ cmp $__KERNEL_CS, %esi
11288+ jz 3f
11289+ ljmp $__KERNEL_CS, $3f
11290+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11291+2:
11292+#ifdef CONFIG_PARAVIRT
11293+ mov %esi, %eax
11294+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11295+#else
11296+ mov %esi, %cr0
11297+#endif
11298+3:
11299+#ifdef CONFIG_PARAVIRT
11300+ popl %ecx
11301+ popl %eax
11302+#endif
11303+ ret
11304+ENDPROC(pax_enter_kernel)
11305+
11306+ENTRY(pax_exit_kernel)
11307+#ifdef CONFIG_PARAVIRT
11308+ pushl %eax
11309+ pushl %ecx
11310+#endif
11311+ mov %cs, %esi
11312+ cmp $__KERNEXEC_KERNEL_CS, %esi
11313+ jnz 2f
11314+#ifdef CONFIG_PARAVIRT
11315+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11316+ mov %eax, %esi
11317+#else
11318+ mov %cr0, %esi
11319+#endif
11320+ btr $16, %esi
11321+ ljmp $__KERNEL_CS, $1f
11322+1:
11323+#ifdef CONFIG_PARAVIRT
11324+ mov %esi, %eax
11325+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11326+#else
11327+ mov %esi, %cr0
11328+#endif
11329+2:
11330+#ifdef CONFIG_PARAVIRT
11331+ popl %ecx
11332+ popl %eax
11333+#endif
11334+ ret
11335+ENDPROC(pax_exit_kernel)
11336+#endif
11337+
11338+.macro pax_erase_kstack
11339+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11340+ call pax_erase_kstack
11341+#endif
11342+.endm
11343+
11344+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11345+/*
11346+ * ebp: thread_info
11347+ * ecx, edx: can be clobbered
11348+ */
11349+ENTRY(pax_erase_kstack)
11350+ pushl %edi
11351+ pushl %eax
11352+
11353+ mov TI_lowest_stack(%ebp), %edi
11354+ mov $-0xBEEF, %eax
11355+ std
11356+
11357+1: mov %edi, %ecx
11358+ and $THREAD_SIZE_asm - 1, %ecx
11359+ shr $2, %ecx
11360+ repne scasl
11361+ jecxz 2f
11362+
11363+ cmp $2*16, %ecx
11364+ jc 2f
11365+
11366+ mov $2*16, %ecx
11367+ repe scasl
11368+ jecxz 2f
11369+ jne 1b
11370+
11371+2: cld
11372+ mov %esp, %ecx
11373+ sub %edi, %ecx
11374+ shr $2, %ecx
11375+ rep stosl
11376+
11377+ mov TI_task_thread_sp0(%ebp), %edi
11378+ sub $128, %edi
11379+ mov %edi, TI_lowest_stack(%ebp)
11380+
11381+ popl %eax
11382+ popl %edi
11383+ ret
11384+ENDPROC(pax_erase_kstack)
11385+#endif
11386+
11387+.macro __SAVE_ALL _DS
11388 cld
11389 PUSH_GS
11390 pushl_cfi %fs
11391@@ -214,7 +347,7 @@
11392 CFI_REL_OFFSET ecx, 0
11393 pushl_cfi %ebx
11394 CFI_REL_OFFSET ebx, 0
11395- movl $(__USER_DS), %edx
11396+ movl $\_DS, %edx
11397 movl %edx, %ds
11398 movl %edx, %es
11399 movl $(__KERNEL_PERCPU), %edx
11400@@ -222,6 +355,15 @@
11401 SET_KERNEL_GS %edx
11402 .endm
11403
11404+.macro SAVE_ALL
11405+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11406+ __SAVE_ALL __KERNEL_DS
11407+ pax_enter_kernel
11408+#else
11409+ __SAVE_ALL __USER_DS
11410+#endif
11411+.endm
11412+
11413 .macro RESTORE_INT_REGS
11414 popl_cfi %ebx
11415 CFI_RESTORE ebx
11416@@ -332,7 +474,15 @@ check_userspace:
11417 movb PT_CS(%esp), %al
11418 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11419 cmpl $USER_RPL, %eax
11420+
11421+#ifdef CONFIG_PAX_KERNEXEC
11422+ jae resume_userspace
11423+
11424+ PAX_EXIT_KERNEL
11425+ jmp resume_kernel
11426+#else
11427 jb resume_kernel # not returning to v8086 or userspace
11428+#endif
11429
11430 ENTRY(resume_userspace)
11431 LOCKDEP_SYS_EXIT
11432@@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11433 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11434 # int/exception return?
11435 jne work_pending
11436- jmp restore_all
11437+ jmp restore_all_pax
11438 END(ret_from_exception)
11439
11440 #ifdef CONFIG_PREEMPT
11441@@ -394,23 +544,34 @@ sysenter_past_esp:
11442 /*CFI_REL_OFFSET cs, 0*/
11443 /*
11444 * Push current_thread_info()->sysenter_return to the stack.
11445- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11446- * pushed above; +8 corresponds to copy_thread's esp0 setting.
11447 */
11448- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11449+ pushl_cfi $0
11450 CFI_REL_OFFSET eip, 0
11451
11452 pushl_cfi %eax
11453 SAVE_ALL
11454+ GET_THREAD_INFO(%ebp)
11455+ movl TI_sysenter_return(%ebp),%ebp
11456+ movl %ebp,PT_EIP(%esp)
11457 ENABLE_INTERRUPTS(CLBR_NONE)
11458
11459 /*
11460 * Load the potential sixth argument from user stack.
11461 * Careful about security.
11462 */
11463+ movl PT_OLDESP(%esp),%ebp
11464+
11465+#ifdef CONFIG_PAX_MEMORY_UDEREF
11466+ mov PT_OLDSS(%esp),%ds
11467+1: movl %ds:(%ebp),%ebp
11468+ push %ss
11469+ pop %ds
11470+#else
11471 cmpl $__PAGE_OFFSET-3,%ebp
11472 jae syscall_fault
11473 1: movl (%ebp),%ebp
11474+#endif
11475+
11476 movl %ebp,PT_EBP(%esp)
11477 .section __ex_table,"a"
11478 .align 4
11479@@ -433,12 +594,24 @@ sysenter_do_call:
11480 testl $_TIF_ALLWORK_MASK, %ecx
11481 jne sysexit_audit
11482 sysenter_exit:
11483+
11484+#ifdef CONFIG_PAX_RANDKSTACK
11485+ pushl_cfi %eax
11486+ movl %esp, %eax
11487+ call pax_randomize_kstack
11488+ popl_cfi %eax
11489+#endif
11490+
11491+ pax_erase_kstack
11492+
11493 /* if something modifies registers it must also disable sysexit */
11494 movl PT_EIP(%esp), %edx
11495 movl PT_OLDESP(%esp), %ecx
11496 xorl %ebp,%ebp
11497 TRACE_IRQS_ON
11498 1: mov PT_FS(%esp), %fs
11499+2: mov PT_DS(%esp), %ds
11500+3: mov PT_ES(%esp), %es
11501 PTGS_TO_GS
11502 ENABLE_INTERRUPTS_SYSEXIT
11503
11504@@ -455,6 +628,9 @@ sysenter_audit:
11505 movl %eax,%edx /* 2nd arg: syscall number */
11506 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11507 call audit_syscall_entry
11508+
11509+ pax_erase_kstack
11510+
11511 pushl_cfi %ebx
11512 movl PT_EAX(%esp),%eax /* reload syscall number */
11513 jmp sysenter_do_call
11514@@ -481,11 +657,17 @@ sysexit_audit:
11515
11516 CFI_ENDPROC
11517 .pushsection .fixup,"ax"
11518-2: movl $0,PT_FS(%esp)
11519+4: movl $0,PT_FS(%esp)
11520+ jmp 1b
11521+5: movl $0,PT_DS(%esp)
11522+ jmp 1b
11523+6: movl $0,PT_ES(%esp)
11524 jmp 1b
11525 .section __ex_table,"a"
11526 .align 4
11527- .long 1b,2b
11528+ .long 1b,4b
11529+ .long 2b,5b
11530+ .long 3b,6b
11531 .popsection
11532 PTGS_TO_GS_EX
11533 ENDPROC(ia32_sysenter_target)
11534@@ -518,6 +700,15 @@ syscall_exit:
11535 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11536 jne syscall_exit_work
11537
11538+restore_all_pax:
11539+
11540+#ifdef CONFIG_PAX_RANDKSTACK
11541+ movl %esp, %eax
11542+ call pax_randomize_kstack
11543+#endif
11544+
11545+ pax_erase_kstack
11546+
11547 restore_all:
11548 TRACE_IRQS_IRET
11549 restore_all_notrace:
11550@@ -577,14 +768,34 @@ ldt_ss:
11551 * compensating for the offset by changing to the ESPFIX segment with
11552 * a base address that matches for the difference.
11553 */
11554-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11555+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11556 mov %esp, %edx /* load kernel esp */
11557 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11558 mov %dx, %ax /* eax: new kernel esp */
11559 sub %eax, %edx /* offset (low word is 0) */
11560+#ifdef CONFIG_SMP
11561+ movl PER_CPU_VAR(cpu_number), %ebx
11562+ shll $PAGE_SHIFT_asm, %ebx
11563+ addl $cpu_gdt_table, %ebx
11564+#else
11565+ movl $cpu_gdt_table, %ebx
11566+#endif
11567 shr $16, %edx
11568- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11569- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11570+
11571+#ifdef CONFIG_PAX_KERNEXEC
11572+ mov %cr0, %esi
11573+ btr $16, %esi
11574+ mov %esi, %cr0
11575+#endif
11576+
11577+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11578+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11579+
11580+#ifdef CONFIG_PAX_KERNEXEC
11581+ bts $16, %esi
11582+ mov %esi, %cr0
11583+#endif
11584+
11585 pushl_cfi $__ESPFIX_SS
11586 pushl_cfi %eax /* new kernel esp */
11587 /* Disable interrupts, but do not irqtrace this section: we
11588@@ -613,29 +824,23 @@ work_resched:
11589 movl TI_flags(%ebp), %ecx
11590 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11591 # than syscall tracing?
11592- jz restore_all
11593+ jz restore_all_pax
11594 testb $_TIF_NEED_RESCHED, %cl
11595 jnz work_resched
11596
11597 work_notifysig: # deal with pending signals and
11598 # notify-resume requests
11599+ movl %esp, %eax
11600 #ifdef CONFIG_VM86
11601 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11602- movl %esp, %eax
11603- jne work_notifysig_v86 # returning to kernel-space or
11604+ jz 1f # returning to kernel-space or
11605 # vm86-space
11606- xorl %edx, %edx
11607- call do_notify_resume
11608- jmp resume_userspace_sig
11609
11610- ALIGN
11611-work_notifysig_v86:
11612 pushl_cfi %ecx # save ti_flags for do_notify_resume
11613 call save_v86_state # %eax contains pt_regs pointer
11614 popl_cfi %ecx
11615 movl %eax, %esp
11616-#else
11617- movl %esp, %eax
11618+1:
11619 #endif
11620 xorl %edx, %edx
11621 call do_notify_resume
11622@@ -648,6 +853,9 @@ syscall_trace_entry:
11623 movl $-ENOSYS,PT_EAX(%esp)
11624 movl %esp, %eax
11625 call syscall_trace_enter
11626+
11627+ pax_erase_kstack
11628+
11629 /* What it returned is what we'll actually use. */
11630 cmpl $(nr_syscalls), %eax
11631 jnae syscall_call
11632@@ -670,6 +878,10 @@ END(syscall_exit_work)
11633
11634 RING0_INT_FRAME # can't unwind into user space anyway
11635 syscall_fault:
11636+#ifdef CONFIG_PAX_MEMORY_UDEREF
11637+ push %ss
11638+ pop %ds
11639+#endif
11640 GET_THREAD_INFO(%ebp)
11641 movl $-EFAULT,PT_EAX(%esp)
11642 jmp resume_userspace
11643@@ -752,6 +964,36 @@ ptregs_clone:
11644 CFI_ENDPROC
11645 ENDPROC(ptregs_clone)
11646
11647+ ALIGN;
11648+ENTRY(kernel_execve)
11649+ CFI_STARTPROC
11650+ pushl_cfi %ebp
11651+ sub $PT_OLDSS+4,%esp
11652+ pushl_cfi %edi
11653+ pushl_cfi %ecx
11654+ pushl_cfi %eax
11655+ lea 3*4(%esp),%edi
11656+ mov $PT_OLDSS/4+1,%ecx
11657+ xorl %eax,%eax
11658+ rep stosl
11659+ popl_cfi %eax
11660+ popl_cfi %ecx
11661+ popl_cfi %edi
11662+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11663+ pushl_cfi %esp
11664+ call sys_execve
11665+ add $4,%esp
11666+ CFI_ADJUST_CFA_OFFSET -4
11667+ GET_THREAD_INFO(%ebp)
11668+ test %eax,%eax
11669+ jz syscall_exit
11670+ add $PT_OLDSS+4,%esp
11671+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11672+ popl_cfi %ebp
11673+ ret
11674+ CFI_ENDPROC
11675+ENDPROC(kernel_execve)
11676+
11677 .macro FIXUP_ESPFIX_STACK
11678 /*
11679 * Switch back for ESPFIX stack to the normal zerobased stack
11680@@ -761,8 +1003,15 @@ ENDPROC(ptregs_clone)
11681 * normal stack and adjusts ESP with the matching offset.
11682 */
11683 /* fixup the stack */
11684- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11685- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11686+#ifdef CONFIG_SMP
11687+ movl PER_CPU_VAR(cpu_number), %ebx
11688+ shll $PAGE_SHIFT_asm, %ebx
11689+ addl $cpu_gdt_table, %ebx
11690+#else
11691+ movl $cpu_gdt_table, %ebx
11692+#endif
11693+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11694+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11695 shl $16, %eax
11696 addl %esp, %eax /* the adjusted stack pointer */
11697 pushl_cfi $__KERNEL_DS
11698@@ -1213,7 +1462,6 @@ return_to_handler:
11699 jmp *%ecx
11700 #endif
11701
11702-.section .rodata,"a"
11703 #include "syscall_table_32.S"
11704
11705 syscall_table_size=(.-sys_call_table)
11706@@ -1259,9 +1507,12 @@ error_code:
11707 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11708 REG_TO_PTGS %ecx
11709 SET_KERNEL_GS %ecx
11710- movl $(__USER_DS), %ecx
11711+ movl $(__KERNEL_DS), %ecx
11712 movl %ecx, %ds
11713 movl %ecx, %es
11714+
11715+ pax_enter_kernel
11716+
11717 TRACE_IRQS_OFF
11718 movl %esp,%eax # pt_regs pointer
11719 call *%edi
11720@@ -1346,6 +1597,9 @@ nmi_stack_correct:
11721 xorl %edx,%edx # zero error code
11722 movl %esp,%eax # pt_regs pointer
11723 call do_nmi
11724+
11725+ pax_exit_kernel
11726+
11727 jmp restore_all_notrace
11728 CFI_ENDPROC
11729
11730@@ -1382,6 +1636,9 @@ nmi_espfix_stack:
11731 FIXUP_ESPFIX_STACK # %eax == %esp
11732 xorl %edx,%edx # zero error code
11733 call do_nmi
11734+
11735+ pax_exit_kernel
11736+
11737 RESTORE_REGS
11738 lss 12+4(%esp), %esp # back to espfix stack
11739 CFI_ADJUST_CFA_OFFSET -24
11740diff -urNp linux-3.0.4/arch/x86/kernel/entry_64.S linux-3.0.4/arch/x86/kernel/entry_64.S
11741--- linux-3.0.4/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
11742+++ linux-3.0.4/arch/x86/kernel/entry_64.S 2011-08-26 19:49:56.000000000 -0400
11743@@ -53,6 +53,7 @@
11744 #include <asm/paravirt.h>
11745 #include <asm/ftrace.h>
11746 #include <asm/percpu.h>
11747+#include <asm/pgtable.h>
11748
11749 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11750 #include <linux/elf-em.h>
11751@@ -176,6 +177,264 @@ ENTRY(native_usergs_sysret64)
11752 ENDPROC(native_usergs_sysret64)
11753 #endif /* CONFIG_PARAVIRT */
11754
11755+ .macro ljmpq sel, off
11756+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11757+ .byte 0x48; ljmp *1234f(%rip)
11758+ .pushsection .rodata
11759+ .align 16
11760+ 1234: .quad \off; .word \sel
11761+ .popsection
11762+#else
11763+ pushq $\sel
11764+ pushq $\off
11765+ lretq
11766+#endif
11767+ .endm
11768+
11769+ .macro pax_enter_kernel
11770+#ifdef CONFIG_PAX_KERNEXEC
11771+ call pax_enter_kernel
11772+#endif
11773+ .endm
11774+
11775+ .macro pax_exit_kernel
11776+#ifdef CONFIG_PAX_KERNEXEC
11777+ call pax_exit_kernel
11778+#endif
11779+ .endm
11780+
11781+#ifdef CONFIG_PAX_KERNEXEC
11782+ENTRY(pax_enter_kernel)
11783+ pushq %rdi
11784+
11785+#ifdef CONFIG_PARAVIRT
11786+ PV_SAVE_REGS(CLBR_RDI)
11787+#endif
11788+
11789+ GET_CR0_INTO_RDI
11790+ bts $16,%rdi
11791+ jnc 1f
11792+ mov %cs,%edi
11793+ cmp $__KERNEL_CS,%edi
11794+ jz 3f
11795+ ljmpq __KERNEL_CS,3f
11796+1: ljmpq __KERNEXEC_KERNEL_CS,2f
11797+2: SET_RDI_INTO_CR0
11798+3:
11799+
11800+#ifdef CONFIG_PARAVIRT
11801+ PV_RESTORE_REGS(CLBR_RDI)
11802+#endif
11803+
11804+ popq %rdi
11805+ retq
11806+ENDPROC(pax_enter_kernel)
11807+
11808+ENTRY(pax_exit_kernel)
11809+ pushq %rdi
11810+
11811+#ifdef CONFIG_PARAVIRT
11812+ PV_SAVE_REGS(CLBR_RDI)
11813+#endif
11814+
11815+ mov %cs,%rdi
11816+ cmp $__KERNEXEC_KERNEL_CS,%edi
11817+ jnz 2f
11818+ GET_CR0_INTO_RDI
11819+ btr $16,%rdi
11820+ ljmpq __KERNEL_CS,1f
11821+1: SET_RDI_INTO_CR0
11822+2:
11823+
11824+#ifdef CONFIG_PARAVIRT
11825+ PV_RESTORE_REGS(CLBR_RDI);
11826+#endif
11827+
11828+ popq %rdi
11829+ retq
11830+ENDPROC(pax_exit_kernel)
11831+#endif
11832+
11833+ .macro pax_enter_kernel_user
11834+#ifdef CONFIG_PAX_MEMORY_UDEREF
11835+ call pax_enter_kernel_user
11836+#endif
11837+ .endm
11838+
11839+ .macro pax_exit_kernel_user
11840+#ifdef CONFIG_PAX_MEMORY_UDEREF
11841+ call pax_exit_kernel_user
11842+#endif
11843+#ifdef CONFIG_PAX_RANDKSTACK
11844+ push %rax
11845+ call pax_randomize_kstack
11846+ pop %rax
11847+#endif
11848+ .endm
11849+
11850+#ifdef CONFIG_PAX_MEMORY_UDEREF
11851+ENTRY(pax_enter_kernel_user)
11852+ pushq %rdi
11853+ pushq %rbx
11854+
11855+#ifdef CONFIG_PARAVIRT
11856+ PV_SAVE_REGS(CLBR_RDI)
11857+#endif
11858+
11859+ GET_CR3_INTO_RDI
11860+ mov %rdi,%rbx
11861+ add $__START_KERNEL_map,%rbx
11862+ sub phys_base(%rip),%rbx
11863+
11864+#ifdef CONFIG_PARAVIRT
11865+ pushq %rdi
11866+ cmpl $0, pv_info+PARAVIRT_enabled
11867+ jz 1f
11868+ i = 0
11869+ .rept USER_PGD_PTRS
11870+ mov i*8(%rbx),%rsi
11871+ mov $0,%sil
11872+ lea i*8(%rbx),%rdi
11873+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11874+ i = i + 1
11875+ .endr
11876+ jmp 2f
11877+1:
11878+#endif
11879+
11880+ i = 0
11881+ .rept USER_PGD_PTRS
11882+ movb $0,i*8(%rbx)
11883+ i = i + 1
11884+ .endr
11885+
11886+#ifdef CONFIG_PARAVIRT
11887+2: popq %rdi
11888+#endif
11889+ SET_RDI_INTO_CR3
11890+
11891+#ifdef CONFIG_PAX_KERNEXEC
11892+ GET_CR0_INTO_RDI
11893+ bts $16,%rdi
11894+ SET_RDI_INTO_CR0
11895+#endif
11896+
11897+#ifdef CONFIG_PARAVIRT
11898+ PV_RESTORE_REGS(CLBR_RDI)
11899+#endif
11900+
11901+ popq %rbx
11902+ popq %rdi
11903+ retq
11904+ENDPROC(pax_enter_kernel_user)
11905+
11906+ENTRY(pax_exit_kernel_user)
11907+ push %rdi
11908+
11909+#ifdef CONFIG_PARAVIRT
11910+ pushq %rbx
11911+ PV_SAVE_REGS(CLBR_RDI)
11912+#endif
11913+
11914+#ifdef CONFIG_PAX_KERNEXEC
11915+ GET_CR0_INTO_RDI
11916+ btr $16,%rdi
11917+ SET_RDI_INTO_CR0
11918+#endif
11919+
11920+ GET_CR3_INTO_RDI
11921+ add $__START_KERNEL_map,%rdi
11922+ sub phys_base(%rip),%rdi
11923+
11924+#ifdef CONFIG_PARAVIRT
11925+ cmpl $0, pv_info+PARAVIRT_enabled
11926+ jz 1f
11927+ mov %rdi,%rbx
11928+ i = 0
11929+ .rept USER_PGD_PTRS
11930+ mov i*8(%rbx),%rsi
11931+ mov $0x67,%sil
11932+ lea i*8(%rbx),%rdi
11933+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11934+ i = i + 1
11935+ .endr
11936+ jmp 2f
11937+1:
11938+#endif
11939+
11940+ i = 0
11941+ .rept USER_PGD_PTRS
11942+ movb $0x67,i*8(%rdi)
11943+ i = i + 1
11944+ .endr
11945+
11946+#ifdef CONFIG_PARAVIRT
11947+2: PV_RESTORE_REGS(CLBR_RDI)
11948+ popq %rbx
11949+#endif
11950+
11951+ popq %rdi
11952+ retq
11953+ENDPROC(pax_exit_kernel_user)
11954+#endif
11955+
11956+ .macro pax_erase_kstack
11957+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11958+ call pax_erase_kstack
11959+#endif
11960+ .endm
11961+
11962+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11963+/*
11964+ * r10: thread_info
11965+ * rcx, rdx: can be clobbered
11966+ */
11967+ENTRY(pax_erase_kstack)
11968+ pushq %rdi
11969+ pushq %rax
11970+ pushq %r10
11971+
11972+ GET_THREAD_INFO(%r10)
11973+ mov TI_lowest_stack(%r10), %rdi
11974+ mov $-0xBEEF, %rax
11975+ std
11976+
11977+1: mov %edi, %ecx
11978+ and $THREAD_SIZE_asm - 1, %ecx
11979+ shr $3, %ecx
11980+ repne scasq
11981+ jecxz 2f
11982+
11983+ cmp $2*8, %ecx
11984+ jc 2f
11985+
11986+ mov $2*8, %ecx
11987+ repe scasq
11988+ jecxz 2f
11989+ jne 1b
11990+
11991+2: cld
11992+ mov %esp, %ecx
11993+ sub %edi, %ecx
11994+
11995+ cmp $THREAD_SIZE_asm, %rcx
11996+ jb 3f
11997+ ud2
11998+3:
11999+
12000+ shr $3, %ecx
12001+ rep stosq
12002+
12003+ mov TI_task_thread_sp0(%r10), %rdi
12004+ sub $256, %rdi
12005+ mov %rdi, TI_lowest_stack(%r10)
12006+
12007+ popq %r10
12008+ popq %rax
12009+ popq %rdi
12010+ ret
12011+ENDPROC(pax_erase_kstack)
12012+#endif
12013
12014 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12015 #ifdef CONFIG_TRACE_IRQFLAGS
12016@@ -318,7 +577,7 @@ ENTRY(save_args)
12017 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12018 movq_cfi rbp, 8 /* push %rbp */
12019 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12020- testl $3, CS(%rdi)
12021+ testb $3, CS(%rdi)
12022 je 1f
12023 SWAPGS
12024 /*
12025@@ -409,7 +668,7 @@ ENTRY(ret_from_fork)
12026
12027 RESTORE_REST
12028
12029- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12030+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12031 je int_ret_from_sys_call
12032
12033 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12034@@ -455,7 +714,7 @@ END(ret_from_fork)
12035 ENTRY(system_call)
12036 CFI_STARTPROC simple
12037 CFI_SIGNAL_FRAME
12038- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12039+ CFI_DEF_CFA rsp,0
12040 CFI_REGISTER rip,rcx
12041 /*CFI_REGISTER rflags,r11*/
12042 SWAPGS_UNSAFE_STACK
12043@@ -468,12 +727,13 @@ ENTRY(system_call_after_swapgs)
12044
12045 movq %rsp,PER_CPU_VAR(old_rsp)
12046 movq PER_CPU_VAR(kernel_stack),%rsp
12047+ pax_enter_kernel_user
12048 /*
12049 * No need to follow this irqs off/on section - it's straight
12050 * and short:
12051 */
12052 ENABLE_INTERRUPTS(CLBR_NONE)
12053- SAVE_ARGS 8,1
12054+ SAVE_ARGS 8*6,1
12055 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12056 movq %rcx,RIP-ARGOFFSET(%rsp)
12057 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12058@@ -502,6 +762,8 @@ sysret_check:
12059 andl %edi,%edx
12060 jnz sysret_careful
12061 CFI_REMEMBER_STATE
12062+ pax_exit_kernel_user
12063+ pax_erase_kstack
12064 /*
12065 * sysretq will re-enable interrupts:
12066 */
12067@@ -560,6 +822,9 @@ auditsys:
12068 movq %rax,%rsi /* 2nd arg: syscall number */
12069 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12070 call audit_syscall_entry
12071+
12072+ pax_erase_kstack
12073+
12074 LOAD_ARGS 0 /* reload call-clobbered registers */
12075 jmp system_call_fastpath
12076
12077@@ -590,6 +855,9 @@ tracesys:
12078 FIXUP_TOP_OF_STACK %rdi
12079 movq %rsp,%rdi
12080 call syscall_trace_enter
12081+
12082+ pax_erase_kstack
12083+
12084 /*
12085 * Reload arg registers from stack in case ptrace changed them.
12086 * We don't reload %rax because syscall_trace_enter() returned
12087@@ -611,7 +879,7 @@ tracesys:
12088 GLOBAL(int_ret_from_sys_call)
12089 DISABLE_INTERRUPTS(CLBR_NONE)
12090 TRACE_IRQS_OFF
12091- testl $3,CS-ARGOFFSET(%rsp)
12092+ testb $3,CS-ARGOFFSET(%rsp)
12093 je retint_restore_args
12094 movl $_TIF_ALLWORK_MASK,%edi
12095 /* edi: mask to check */
12096@@ -793,6 +1061,16 @@ END(interrupt)
12097 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12098 call save_args
12099 PARTIAL_FRAME 0
12100+#ifdef CONFIG_PAX_MEMORY_UDEREF
12101+ testb $3, CS(%rdi)
12102+ jnz 1f
12103+ pax_enter_kernel
12104+ jmp 2f
12105+1: pax_enter_kernel_user
12106+2:
12107+#else
12108+ pax_enter_kernel
12109+#endif
12110 call \func
12111 .endm
12112
12113@@ -825,7 +1103,7 @@ ret_from_intr:
12114 CFI_ADJUST_CFA_OFFSET -8
12115 exit_intr:
12116 GET_THREAD_INFO(%rcx)
12117- testl $3,CS-ARGOFFSET(%rsp)
12118+ testb $3,CS-ARGOFFSET(%rsp)
12119 je retint_kernel
12120
12121 /* Interrupt came from user space */
12122@@ -847,12 +1125,15 @@ retint_swapgs: /* return to user-space
12123 * The iretq could re-enable interrupts:
12124 */
12125 DISABLE_INTERRUPTS(CLBR_ANY)
12126+ pax_exit_kernel_user
12127+ pax_erase_kstack
12128 TRACE_IRQS_IRETQ
12129 SWAPGS
12130 jmp restore_args
12131
12132 retint_restore_args: /* return to kernel space */
12133 DISABLE_INTERRUPTS(CLBR_ANY)
12134+ pax_exit_kernel
12135 /*
12136 * The iretq could re-enable interrupts:
12137 */
12138@@ -1027,6 +1308,16 @@ ENTRY(\sym)
12139 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12140 call error_entry
12141 DEFAULT_FRAME 0
12142+#ifdef CONFIG_PAX_MEMORY_UDEREF
12143+ testb $3, CS(%rsp)
12144+ jnz 1f
12145+ pax_enter_kernel
12146+ jmp 2f
12147+1: pax_enter_kernel_user
12148+2:
12149+#else
12150+ pax_enter_kernel
12151+#endif
12152 movq %rsp,%rdi /* pt_regs pointer */
12153 xorl %esi,%esi /* no error code */
12154 call \do_sym
12155@@ -1044,6 +1335,16 @@ ENTRY(\sym)
12156 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12157 call save_paranoid
12158 TRACE_IRQS_OFF
12159+#ifdef CONFIG_PAX_MEMORY_UDEREF
12160+ testb $3, CS(%rsp)
12161+ jnz 1f
12162+ pax_enter_kernel
12163+ jmp 2f
12164+1: pax_enter_kernel_user
12165+2:
12166+#else
12167+ pax_enter_kernel
12168+#endif
12169 movq %rsp,%rdi /* pt_regs pointer */
12170 xorl %esi,%esi /* no error code */
12171 call \do_sym
12172@@ -1052,7 +1353,7 @@ ENTRY(\sym)
12173 END(\sym)
12174 .endm
12175
12176-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12177+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12178 .macro paranoidzeroentry_ist sym do_sym ist
12179 ENTRY(\sym)
12180 INTR_FRAME
12181@@ -1062,8 +1363,24 @@ ENTRY(\sym)
12182 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12183 call save_paranoid
12184 TRACE_IRQS_OFF
12185+#ifdef CONFIG_PAX_MEMORY_UDEREF
12186+ testb $3, CS(%rsp)
12187+ jnz 1f
12188+ pax_enter_kernel
12189+ jmp 2f
12190+1: pax_enter_kernel_user
12191+2:
12192+#else
12193+ pax_enter_kernel
12194+#endif
12195 movq %rsp,%rdi /* pt_regs pointer */
12196 xorl %esi,%esi /* no error code */
12197+#ifdef CONFIG_SMP
12198+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12199+ lea init_tss(%r12), %r12
12200+#else
12201+ lea init_tss(%rip), %r12
12202+#endif
12203 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12204 call \do_sym
12205 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12206@@ -1080,6 +1397,16 @@ ENTRY(\sym)
12207 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12208 call error_entry
12209 DEFAULT_FRAME 0
12210+#ifdef CONFIG_PAX_MEMORY_UDEREF
12211+ testb $3, CS(%rsp)
12212+ jnz 1f
12213+ pax_enter_kernel
12214+ jmp 2f
12215+1: pax_enter_kernel_user
12216+2:
12217+#else
12218+ pax_enter_kernel
12219+#endif
12220 movq %rsp,%rdi /* pt_regs pointer */
12221 movq ORIG_RAX(%rsp),%rsi /* get error code */
12222 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12223@@ -1099,6 +1426,16 @@ ENTRY(\sym)
12224 call save_paranoid
12225 DEFAULT_FRAME 0
12226 TRACE_IRQS_OFF
12227+#ifdef CONFIG_PAX_MEMORY_UDEREF
12228+ testb $3, CS(%rsp)
12229+ jnz 1f
12230+ pax_enter_kernel
12231+ jmp 2f
12232+1: pax_enter_kernel_user
12233+2:
12234+#else
12235+ pax_enter_kernel
12236+#endif
12237 movq %rsp,%rdi /* pt_regs pointer */
12238 movq ORIG_RAX(%rsp),%rsi /* get error code */
12239 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12240@@ -1361,14 +1698,27 @@ ENTRY(paranoid_exit)
12241 TRACE_IRQS_OFF
12242 testl %ebx,%ebx /* swapgs needed? */
12243 jnz paranoid_restore
12244- testl $3,CS(%rsp)
12245+ testb $3,CS(%rsp)
12246 jnz paranoid_userspace
12247+#ifdef CONFIG_PAX_MEMORY_UDEREF
12248+ pax_exit_kernel
12249+ TRACE_IRQS_IRETQ 0
12250+ SWAPGS_UNSAFE_STACK
12251+ RESTORE_ALL 8
12252+ jmp irq_return
12253+#endif
12254 paranoid_swapgs:
12255+#ifdef CONFIG_PAX_MEMORY_UDEREF
12256+ pax_exit_kernel_user
12257+#else
12258+ pax_exit_kernel
12259+#endif
12260 TRACE_IRQS_IRETQ 0
12261 SWAPGS_UNSAFE_STACK
12262 RESTORE_ALL 8
12263 jmp irq_return
12264 paranoid_restore:
12265+ pax_exit_kernel
12266 TRACE_IRQS_IRETQ 0
12267 RESTORE_ALL 8
12268 jmp irq_return
12269@@ -1426,7 +1776,7 @@ ENTRY(error_entry)
12270 movq_cfi r14, R14+8
12271 movq_cfi r15, R15+8
12272 xorl %ebx,%ebx
12273- testl $3,CS+8(%rsp)
12274+ testb $3,CS+8(%rsp)
12275 je error_kernelspace
12276 error_swapgs:
12277 SWAPGS
12278@@ -1490,6 +1840,16 @@ ENTRY(nmi)
12279 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12280 call save_paranoid
12281 DEFAULT_FRAME 0
12282+#ifdef CONFIG_PAX_MEMORY_UDEREF
12283+ testb $3, CS(%rsp)
12284+ jnz 1f
12285+ pax_enter_kernel
12286+ jmp 2f
12287+1: pax_enter_kernel_user
12288+2:
12289+#else
12290+ pax_enter_kernel
12291+#endif
12292 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12293 movq %rsp,%rdi
12294 movq $-1,%rsi
12295@@ -1500,11 +1860,25 @@ ENTRY(nmi)
12296 DISABLE_INTERRUPTS(CLBR_NONE)
12297 testl %ebx,%ebx /* swapgs needed? */
12298 jnz nmi_restore
12299- testl $3,CS(%rsp)
12300+ testb $3,CS(%rsp)
12301 jnz nmi_userspace
12302+#ifdef CONFIG_PAX_MEMORY_UDEREF
12303+ pax_exit_kernel
12304+ SWAPGS_UNSAFE_STACK
12305+ RESTORE_ALL 8
12306+ jmp irq_return
12307+#endif
12308 nmi_swapgs:
12309+#ifdef CONFIG_PAX_MEMORY_UDEREF
12310+ pax_exit_kernel_user
12311+#else
12312+ pax_exit_kernel
12313+#endif
12314 SWAPGS_UNSAFE_STACK
12315+ RESTORE_ALL 8
12316+ jmp irq_return
12317 nmi_restore:
12318+ pax_exit_kernel
12319 RESTORE_ALL 8
12320 jmp irq_return
12321 nmi_userspace:
12322diff -urNp linux-3.0.4/arch/x86/kernel/ftrace.c linux-3.0.4/arch/x86/kernel/ftrace.c
12323--- linux-3.0.4/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
12324+++ linux-3.0.4/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
12325@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12326 static const void *mod_code_newcode; /* holds the text to write to the IP */
12327
12328 static unsigned nmi_wait_count;
12329-static atomic_t nmi_update_count = ATOMIC_INIT(0);
12330+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12331
12332 int ftrace_arch_read_dyn_info(char *buf, int size)
12333 {
12334@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12335
12336 r = snprintf(buf, size, "%u %u",
12337 nmi_wait_count,
12338- atomic_read(&nmi_update_count));
12339+ atomic_read_unchecked(&nmi_update_count));
12340 return r;
12341 }
12342
12343@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12344
12345 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12346 smp_rmb();
12347+ pax_open_kernel();
12348 ftrace_mod_code();
12349- atomic_inc(&nmi_update_count);
12350+ pax_close_kernel();
12351+ atomic_inc_unchecked(&nmi_update_count);
12352 }
12353 /* Must have previous changes seen before executions */
12354 smp_mb();
12355@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12356 {
12357 unsigned char replaced[MCOUNT_INSN_SIZE];
12358
12359+ ip = ktla_ktva(ip);
12360+
12361 /*
12362 * Note: Due to modules and __init, code can
12363 * disappear and change, we need to protect against faulting
12364@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12365 unsigned char old[MCOUNT_INSN_SIZE], *new;
12366 int ret;
12367
12368- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12369+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12370 new = ftrace_call_replace(ip, (unsigned long)func);
12371 ret = ftrace_modify_code(ip, old, new);
12372
12373@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12374 {
12375 unsigned char code[MCOUNT_INSN_SIZE];
12376
12377+ ip = ktla_ktva(ip);
12378+
12379 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12380 return -EFAULT;
12381
12382diff -urNp linux-3.0.4/arch/x86/kernel/head32.c linux-3.0.4/arch/x86/kernel/head32.c
12383--- linux-3.0.4/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
12384+++ linux-3.0.4/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
12385@@ -19,6 +19,7 @@
12386 #include <asm/io_apic.h>
12387 #include <asm/bios_ebda.h>
12388 #include <asm/tlbflush.h>
12389+#include <asm/boot.h>
12390
12391 static void __init i386_default_early_setup(void)
12392 {
12393@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
12394 {
12395 memblock_init();
12396
12397- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12398+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12399
12400 #ifdef CONFIG_BLK_DEV_INITRD
12401 /* Reserve INITRD */
12402diff -urNp linux-3.0.4/arch/x86/kernel/head_32.S linux-3.0.4/arch/x86/kernel/head_32.S
12403--- linux-3.0.4/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
12404+++ linux-3.0.4/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
12405@@ -25,6 +25,12 @@
12406 /* Physical address */
12407 #define pa(X) ((X) - __PAGE_OFFSET)
12408
12409+#ifdef CONFIG_PAX_KERNEXEC
12410+#define ta(X) (X)
12411+#else
12412+#define ta(X) ((X) - __PAGE_OFFSET)
12413+#endif
12414+
12415 /*
12416 * References to members of the new_cpu_data structure.
12417 */
12418@@ -54,11 +60,7 @@
12419 * and small than max_low_pfn, otherwise will waste some page table entries
12420 */
12421
12422-#if PTRS_PER_PMD > 1
12423-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12424-#else
12425-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12426-#endif
12427+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12428
12429 /* Number of possible pages in the lowmem region */
12430 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12431@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12432 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12433
12434 /*
12435+ * Real beginning of normal "text" segment
12436+ */
12437+ENTRY(stext)
12438+ENTRY(_stext)
12439+
12440+/*
12441 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12442 * %esi points to the real-mode code as a 32-bit pointer.
12443 * CS and DS must be 4 GB flat segments, but we don't depend on
12444@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12445 * can.
12446 */
12447 __HEAD
12448+
12449+#ifdef CONFIG_PAX_KERNEXEC
12450+ jmp startup_32
12451+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12452+.fill PAGE_SIZE-5,1,0xcc
12453+#endif
12454+
12455 ENTRY(startup_32)
12456 movl pa(stack_start),%ecx
12457
12458@@ -105,6 +120,57 @@ ENTRY(startup_32)
12459 2:
12460 leal -__PAGE_OFFSET(%ecx),%esp
12461
12462+#ifdef CONFIG_SMP
12463+ movl $pa(cpu_gdt_table),%edi
12464+ movl $__per_cpu_load,%eax
12465+ movw %ax,__KERNEL_PERCPU + 2(%edi)
12466+ rorl $16,%eax
12467+ movb %al,__KERNEL_PERCPU + 4(%edi)
12468+ movb %ah,__KERNEL_PERCPU + 7(%edi)
12469+ movl $__per_cpu_end - 1,%eax
12470+ subl $__per_cpu_start,%eax
12471+ movw %ax,__KERNEL_PERCPU + 0(%edi)
12472+#endif
12473+
12474+#ifdef CONFIG_PAX_MEMORY_UDEREF
12475+ movl $NR_CPUS,%ecx
12476+ movl $pa(cpu_gdt_table),%edi
12477+1:
12478+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12479+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12480+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12481+ addl $PAGE_SIZE_asm,%edi
12482+ loop 1b
12483+#endif
12484+
12485+#ifdef CONFIG_PAX_KERNEXEC
12486+ movl $pa(boot_gdt),%edi
12487+ movl $__LOAD_PHYSICAL_ADDR,%eax
12488+ movw %ax,__BOOT_CS + 2(%edi)
12489+ rorl $16,%eax
12490+ movb %al,__BOOT_CS + 4(%edi)
12491+ movb %ah,__BOOT_CS + 7(%edi)
12492+ rorl $16,%eax
12493+
12494+ ljmp $(__BOOT_CS),$1f
12495+1:
12496+
12497+ movl $NR_CPUS,%ecx
12498+ movl $pa(cpu_gdt_table),%edi
12499+ addl $__PAGE_OFFSET,%eax
12500+1:
12501+ movw %ax,__KERNEL_CS + 2(%edi)
12502+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12503+ rorl $16,%eax
12504+ movb %al,__KERNEL_CS + 4(%edi)
12505+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12506+ movb %ah,__KERNEL_CS + 7(%edi)
12507+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12508+ rorl $16,%eax
12509+ addl $PAGE_SIZE_asm,%edi
12510+ loop 1b
12511+#endif
12512+
12513 /*
12514 * Clear BSS first so that there are no surprises...
12515 */
12516@@ -195,8 +261,11 @@ ENTRY(startup_32)
12517 movl %eax, pa(max_pfn_mapped)
12518
12519 /* Do early initialization of the fixmap area */
12520- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12521- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12522+#ifdef CONFIG_COMPAT_VDSO
12523+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12524+#else
12525+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12526+#endif
12527 #else /* Not PAE */
12528
12529 page_pde_offset = (__PAGE_OFFSET >> 20);
12530@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12531 movl %eax, pa(max_pfn_mapped)
12532
12533 /* Do early initialization of the fixmap area */
12534- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12535- movl %eax,pa(initial_page_table+0xffc)
12536+#ifdef CONFIG_COMPAT_VDSO
12537+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12538+#else
12539+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12540+#endif
12541 #endif
12542
12543 #ifdef CONFIG_PARAVIRT
12544@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12545 cmpl $num_subarch_entries, %eax
12546 jae bad_subarch
12547
12548- movl pa(subarch_entries)(,%eax,4), %eax
12549- subl $__PAGE_OFFSET, %eax
12550- jmp *%eax
12551+ jmp *pa(subarch_entries)(,%eax,4)
12552
12553 bad_subarch:
12554 WEAK(lguest_entry)
12555@@ -255,10 +325,10 @@ WEAK(xen_entry)
12556 __INITDATA
12557
12558 subarch_entries:
12559- .long default_entry /* normal x86/PC */
12560- .long lguest_entry /* lguest hypervisor */
12561- .long xen_entry /* Xen hypervisor */
12562- .long default_entry /* Moorestown MID */
12563+ .long ta(default_entry) /* normal x86/PC */
12564+ .long ta(lguest_entry) /* lguest hypervisor */
12565+ .long ta(xen_entry) /* Xen hypervisor */
12566+ .long ta(default_entry) /* Moorestown MID */
12567 num_subarch_entries = (. - subarch_entries) / 4
12568 .previous
12569 #else
12570@@ -312,6 +382,7 @@ default_entry:
12571 orl %edx,%eax
12572 movl %eax,%cr4
12573
12574+#ifdef CONFIG_X86_PAE
12575 testb $X86_CR4_PAE, %al # check if PAE is enabled
12576 jz 6f
12577
12578@@ -340,6 +411,9 @@ default_entry:
12579 /* Make changes effective */
12580 wrmsr
12581
12582+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12583+#endif
12584+
12585 6:
12586
12587 /*
12588@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12589 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12590 movl %eax,%ss # after changing gdt.
12591
12592- movl $(__USER_DS),%eax # DS/ES contains default USER segment
12593+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12594 movl %eax,%ds
12595 movl %eax,%es
12596
12597@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12598 */
12599 cmpb $0,ready
12600 jne 1f
12601- movl $gdt_page,%eax
12602+ movl $cpu_gdt_table,%eax
12603 movl $stack_canary,%ecx
12604+#ifdef CONFIG_SMP
12605+ addl $__per_cpu_load,%ecx
12606+#endif
12607 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12608 shrl $16, %ecx
12609 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12610 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12611 1:
12612-#endif
12613 movl $(__KERNEL_STACK_CANARY),%eax
12614+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12615+ movl $(__USER_DS),%eax
12616+#else
12617+ xorl %eax,%eax
12618+#endif
12619 movl %eax,%gs
12620
12621 xorl %eax,%eax # Clear LDT
12622@@ -558,22 +639,22 @@ early_page_fault:
12623 jmp early_fault
12624
12625 early_fault:
12626- cld
12627 #ifdef CONFIG_PRINTK
12628+ cmpl $1,%ss:early_recursion_flag
12629+ je hlt_loop
12630+ incl %ss:early_recursion_flag
12631+ cld
12632 pusha
12633 movl $(__KERNEL_DS),%eax
12634 movl %eax,%ds
12635 movl %eax,%es
12636- cmpl $2,early_recursion_flag
12637- je hlt_loop
12638- incl early_recursion_flag
12639 movl %cr2,%eax
12640 pushl %eax
12641 pushl %edx /* trapno */
12642 pushl $fault_msg
12643 call printk
12644+; call dump_stack
12645 #endif
12646- call dump_stack
12647 hlt_loop:
12648 hlt
12649 jmp hlt_loop
12650@@ -581,8 +662,11 @@ hlt_loop:
12651 /* This is the default interrupt "handler" :-) */
12652 ALIGN
12653 ignore_int:
12654- cld
12655 #ifdef CONFIG_PRINTK
12656+ cmpl $2,%ss:early_recursion_flag
12657+ je hlt_loop
12658+ incl %ss:early_recursion_flag
12659+ cld
12660 pushl %eax
12661 pushl %ecx
12662 pushl %edx
12663@@ -591,9 +675,6 @@ ignore_int:
12664 movl $(__KERNEL_DS),%eax
12665 movl %eax,%ds
12666 movl %eax,%es
12667- cmpl $2,early_recursion_flag
12668- je hlt_loop
12669- incl early_recursion_flag
12670 pushl 16(%esp)
12671 pushl 24(%esp)
12672 pushl 32(%esp)
12673@@ -622,29 +703,43 @@ ENTRY(initial_code)
12674 /*
12675 * BSS section
12676 */
12677-__PAGE_ALIGNED_BSS
12678- .align PAGE_SIZE
12679 #ifdef CONFIG_X86_PAE
12680+.section .initial_pg_pmd,"a",@progbits
12681 initial_pg_pmd:
12682 .fill 1024*KPMDS,4,0
12683 #else
12684+.section .initial_page_table,"a",@progbits
12685 ENTRY(initial_page_table)
12686 .fill 1024,4,0
12687 #endif
12688+.section .initial_pg_fixmap,"a",@progbits
12689 initial_pg_fixmap:
12690 .fill 1024,4,0
12691+.section .empty_zero_page,"a",@progbits
12692 ENTRY(empty_zero_page)
12693 .fill 4096,1,0
12694+.section .swapper_pg_dir,"a",@progbits
12695 ENTRY(swapper_pg_dir)
12696+#ifdef CONFIG_X86_PAE
12697+ .fill 4,8,0
12698+#else
12699 .fill 1024,4,0
12700+#endif
12701+
12702+/*
12703+ * The IDT has to be page-aligned to simplify the Pentium
12704+ * F0 0F bug workaround.. We have a special link segment
12705+ * for this.
12706+ */
12707+.section .idt,"a",@progbits
12708+ENTRY(idt_table)
12709+ .fill 256,8,0
12710
12711 /*
12712 * This starts the data section.
12713 */
12714 #ifdef CONFIG_X86_PAE
12715-__PAGE_ALIGNED_DATA
12716- /* Page-aligned for the benefit of paravirt? */
12717- .align PAGE_SIZE
12718+.section .initial_page_table,"a",@progbits
12719 ENTRY(initial_page_table)
12720 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12721 # if KPMDS == 3
12722@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12723 # error "Kernel PMDs should be 1, 2 or 3"
12724 # endif
12725 .align PAGE_SIZE /* needs to be page-sized too */
12726+
12727+#ifdef CONFIG_PAX_PER_CPU_PGD
12728+ENTRY(cpu_pgd)
12729+ .rept NR_CPUS
12730+ .fill 4,8,0
12731+ .endr
12732+#endif
12733+
12734 #endif
12735
12736 .data
12737 .balign 4
12738 ENTRY(stack_start)
12739- .long init_thread_union+THREAD_SIZE
12740+ .long init_thread_union+THREAD_SIZE-8
12741+
12742+ready: .byte 0
12743
12744+.section .rodata,"a",@progbits
12745 early_recursion_flag:
12746 .long 0
12747
12748-ready: .byte 0
12749-
12750 int_msg:
12751 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12752
12753@@ -707,7 +811,7 @@ fault_msg:
12754 .word 0 # 32 bit align gdt_desc.address
12755 boot_gdt_descr:
12756 .word __BOOT_DS+7
12757- .long boot_gdt - __PAGE_OFFSET
12758+ .long pa(boot_gdt)
12759
12760 .word 0 # 32-bit align idt_desc.address
12761 idt_descr:
12762@@ -718,7 +822,7 @@ idt_descr:
12763 .word 0 # 32 bit align gdt_desc.address
12764 ENTRY(early_gdt_descr)
12765 .word GDT_ENTRIES*8-1
12766- .long gdt_page /* Overwritten for secondary CPUs */
12767+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
12768
12769 /*
12770 * The boot_gdt must mirror the equivalent in setup.S and is
12771@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12772 .align L1_CACHE_BYTES
12773 ENTRY(boot_gdt)
12774 .fill GDT_ENTRY_BOOT_CS,8,0
12775- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12776- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12777+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12778+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12779+
12780+ .align PAGE_SIZE_asm
12781+ENTRY(cpu_gdt_table)
12782+ .rept NR_CPUS
12783+ .quad 0x0000000000000000 /* NULL descriptor */
12784+ .quad 0x0000000000000000 /* 0x0b reserved */
12785+ .quad 0x0000000000000000 /* 0x13 reserved */
12786+ .quad 0x0000000000000000 /* 0x1b reserved */
12787+
12788+#ifdef CONFIG_PAX_KERNEXEC
12789+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12790+#else
12791+ .quad 0x0000000000000000 /* 0x20 unused */
12792+#endif
12793+
12794+ .quad 0x0000000000000000 /* 0x28 unused */
12795+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12796+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12797+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12798+ .quad 0x0000000000000000 /* 0x4b reserved */
12799+ .quad 0x0000000000000000 /* 0x53 reserved */
12800+ .quad 0x0000000000000000 /* 0x5b reserved */
12801+
12802+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12803+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12804+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12805+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12806+
12807+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12808+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12809+
12810+ /*
12811+ * Segments used for calling PnP BIOS have byte granularity.
12812+ * The code segments and data segments have fixed 64k limits,
12813+ * the transfer segment sizes are set at run time.
12814+ */
12815+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
12816+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
12817+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
12818+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
12819+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
12820+
12821+ /*
12822+ * The APM segments have byte granularity and their bases
12823+ * are set at run time. All have 64k limits.
12824+ */
12825+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12826+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12827+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
12828+
12829+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12830+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12831+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12832+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12833+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12834+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12835+
12836+ /* Be sure this is zeroed to avoid false validations in Xen */
12837+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12838+ .endr
12839diff -urNp linux-3.0.4/arch/x86/kernel/head_64.S linux-3.0.4/arch/x86/kernel/head_64.S
12840--- linux-3.0.4/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
12841+++ linux-3.0.4/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
12842@@ -19,6 +19,7 @@
12843 #include <asm/cache.h>
12844 #include <asm/processor-flags.h>
12845 #include <asm/percpu.h>
12846+#include <asm/cpufeature.h>
12847
12848 #ifdef CONFIG_PARAVIRT
12849 #include <asm/asm-offsets.h>
12850@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12851 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12852 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12853 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12854+L4_VMALLOC_START = pgd_index(VMALLOC_START)
12855+L3_VMALLOC_START = pud_index(VMALLOC_START)
12856+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12857+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12858
12859 .text
12860 __HEAD
12861@@ -85,35 +90,22 @@ startup_64:
12862 */
12863 addq %rbp, init_level4_pgt + 0(%rip)
12864 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12865+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12866+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12867 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12868
12869 addq %rbp, level3_ident_pgt + 0(%rip)
12870+#ifndef CONFIG_XEN
12871+ addq %rbp, level3_ident_pgt + 8(%rip)
12872+#endif
12873
12874- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12875- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12876+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12877
12878- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12879+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12880+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12881
12882- /* Add an Identity mapping if I am above 1G */
12883- leaq _text(%rip), %rdi
12884- andq $PMD_PAGE_MASK, %rdi
12885-
12886- movq %rdi, %rax
12887- shrq $PUD_SHIFT, %rax
12888- andq $(PTRS_PER_PUD - 1), %rax
12889- jz ident_complete
12890-
12891- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12892- leaq level3_ident_pgt(%rip), %rbx
12893- movq %rdx, 0(%rbx, %rax, 8)
12894-
12895- movq %rdi, %rax
12896- shrq $PMD_SHIFT, %rax
12897- andq $(PTRS_PER_PMD - 1), %rax
12898- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12899- leaq level2_spare_pgt(%rip), %rbx
12900- movq %rdx, 0(%rbx, %rax, 8)
12901-ident_complete:
12902+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12903+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12904
12905 /*
12906 * Fixup the kernel text+data virtual addresses. Note that
12907@@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12908 * after the boot processor executes this code.
12909 */
12910
12911- /* Enable PAE mode and PGE */
12912- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12913+ /* Enable PAE mode and PSE/PGE */
12914+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12915 movq %rax, %cr4
12916
12917 /* Setup early boot stage 4 level pagetables. */
12918@@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12919 movl $MSR_EFER, %ecx
12920 rdmsr
12921 btsl $_EFER_SCE, %eax /* Enable System Call */
12922- btl $20,%edi /* No Execute supported? */
12923+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12924 jnc 1f
12925 btsl $_EFER_NX, %eax
12926+ leaq init_level4_pgt(%rip), %rdi
12927+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12928+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12929+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12930+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12931 1: wrmsr /* Make changes effective */
12932
12933 /* Setup cr0 */
12934@@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
12935 bad_address:
12936 jmp bad_address
12937
12938- .section ".init.text","ax"
12939+ __INIT
12940 #ifdef CONFIG_EARLY_PRINTK
12941 .globl early_idt_handlers
12942 early_idt_handlers:
12943@@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
12944 #endif /* EARLY_PRINTK */
12945 1: hlt
12946 jmp 1b
12947+ .previous
12948
12949 #ifdef CONFIG_EARLY_PRINTK
12950+ __INITDATA
12951 early_recursion_flag:
12952 .long 0
12953+ .previous
12954
12955+ .section .rodata,"a",@progbits
12956 early_idt_msg:
12957 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
12958 early_idt_ripmsg:
12959 .asciz "RIP %s\n"
12960-#endif /* CONFIG_EARLY_PRINTK */
12961 .previous
12962+#endif /* CONFIG_EARLY_PRINTK */
12963
12964+ .section .rodata,"a",@progbits
12965 #define NEXT_PAGE(name) \
12966 .balign PAGE_SIZE; \
12967 ENTRY(name)
12968@@ -338,7 +340,6 @@ ENTRY(name)
12969 i = i + 1 ; \
12970 .endr
12971
12972- .data
12973 /*
12974 * This default setting generates an ident mapping at address 0x100000
12975 * and a mapping for the kernel that precisely maps virtual address
12976@@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
12977 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12978 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
12979 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12980+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
12981+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
12982+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
12983+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
12984 .org init_level4_pgt + L4_START_KERNEL*8, 0
12985 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
12986 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
12987
12988+#ifdef CONFIG_PAX_PER_CPU_PGD
12989+NEXT_PAGE(cpu_pgd)
12990+ .rept NR_CPUS
12991+ .fill 512,8,0
12992+ .endr
12993+#endif
12994+
12995 NEXT_PAGE(level3_ident_pgt)
12996 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12997+#ifdef CONFIG_XEN
12998 .fill 511,8,0
12999+#else
13000+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13001+ .fill 510,8,0
13002+#endif
13003+
13004+NEXT_PAGE(level3_vmalloc_pgt)
13005+ .fill 512,8,0
13006+
13007+NEXT_PAGE(level3_vmemmap_pgt)
13008+ .fill L3_VMEMMAP_START,8,0
13009+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13010
13011 NEXT_PAGE(level3_kernel_pgt)
13012 .fill L3_START_KERNEL,8,0
13013@@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13014 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13015 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13016
13017+NEXT_PAGE(level2_vmemmap_pgt)
13018+ .fill 512,8,0
13019+
13020 NEXT_PAGE(level2_fixmap_pgt)
13021- .fill 506,8,0
13022- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13023- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13024- .fill 5,8,0
13025+ .fill 507,8,0
13026+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13027+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13028+ .fill 4,8,0
13029
13030-NEXT_PAGE(level1_fixmap_pgt)
13031+NEXT_PAGE(level1_vsyscall_pgt)
13032 .fill 512,8,0
13033
13034-NEXT_PAGE(level2_ident_pgt)
13035- /* Since I easily can, map the first 1G.
13036+ /* Since I easily can, map the first 2G.
13037 * Don't set NX because code runs from these pages.
13038 */
13039- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13040+NEXT_PAGE(level2_ident_pgt)
13041+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13042
13043 NEXT_PAGE(level2_kernel_pgt)
13044 /*
13045@@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13046 * If you want to increase this then increase MODULES_VADDR
13047 * too.)
13048 */
13049- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13050- KERNEL_IMAGE_SIZE/PMD_SIZE)
13051-
13052-NEXT_PAGE(level2_spare_pgt)
13053- .fill 512, 8, 0
13054+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13055
13056 #undef PMDS
13057 #undef NEXT_PAGE
13058
13059- .data
13060+ .align PAGE_SIZE
13061+ENTRY(cpu_gdt_table)
13062+ .rept NR_CPUS
13063+ .quad 0x0000000000000000 /* NULL descriptor */
13064+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13065+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13066+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13067+ .quad 0x00cffb000000ffff /* __USER32_CS */
13068+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13069+ .quad 0x00affb000000ffff /* __USER_CS */
13070+
13071+#ifdef CONFIG_PAX_KERNEXEC
13072+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13073+#else
13074+ .quad 0x0 /* unused */
13075+#endif
13076+
13077+ .quad 0,0 /* TSS */
13078+ .quad 0,0 /* LDT */
13079+ .quad 0,0,0 /* three TLS descriptors */
13080+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
13081+ /* asm/segment.h:GDT_ENTRIES must match this */
13082+
13083+ /* zero the remaining page */
13084+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13085+ .endr
13086+
13087 .align 16
13088 .globl early_gdt_descr
13089 early_gdt_descr:
13090 .word GDT_ENTRIES*8-1
13091 early_gdt_descr_base:
13092- .quad INIT_PER_CPU_VAR(gdt_page)
13093+ .quad cpu_gdt_table
13094
13095 ENTRY(phys_base)
13096 /* This must match the first entry in level2_kernel_pgt */
13097 .quad 0x0000000000000000
13098
13099 #include "../../x86/xen/xen-head.S"
13100-
13101- .section .bss, "aw", @nobits
13102+
13103+ .section .rodata,"a",@progbits
13104 .align L1_CACHE_BYTES
13105 ENTRY(idt_table)
13106- .skip IDT_ENTRIES * 16
13107+ .fill 512,8,0
13108
13109 __PAGE_ALIGNED_BSS
13110 .align PAGE_SIZE
13111diff -urNp linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c
13112--- linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13113+++ linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13114@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13115 EXPORT_SYMBOL(cmpxchg8b_emu);
13116 #endif
13117
13118+EXPORT_SYMBOL_GPL(cpu_gdt_table);
13119+
13120 /* Networking helper routines. */
13121 EXPORT_SYMBOL(csum_partial_copy_generic);
13122+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13123+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13124
13125 EXPORT_SYMBOL(__get_user_1);
13126 EXPORT_SYMBOL(__get_user_2);
13127@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13128
13129 EXPORT_SYMBOL(csum_partial);
13130 EXPORT_SYMBOL(empty_zero_page);
13131+
13132+#ifdef CONFIG_PAX_KERNEXEC
13133+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13134+#endif
13135diff -urNp linux-3.0.4/arch/x86/kernel/i8259.c linux-3.0.4/arch/x86/kernel/i8259.c
13136--- linux-3.0.4/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13137+++ linux-3.0.4/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13138@@ -210,7 +210,7 @@ spurious_8259A_irq:
13139 "spurious 8259A interrupt: IRQ%d.\n", irq);
13140 spurious_irq_mask |= irqmask;
13141 }
13142- atomic_inc(&irq_err_count);
13143+ atomic_inc_unchecked(&irq_err_count);
13144 /*
13145 * Theoretically we do not have to handle this IRQ,
13146 * but in Linux this does not cause problems and is
13147diff -urNp linux-3.0.4/arch/x86/kernel/init_task.c linux-3.0.4/arch/x86/kernel/init_task.c
13148--- linux-3.0.4/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
13149+++ linux-3.0.4/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
13150@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13151 * way process stacks are handled. This is done by having a special
13152 * "init_task" linker map entry..
13153 */
13154-union thread_union init_thread_union __init_task_data =
13155- { INIT_THREAD_INFO(init_task) };
13156+union thread_union init_thread_union __init_task_data;
13157
13158 /*
13159 * Initial task structure.
13160@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13161 * section. Since TSS's are completely CPU-local, we want them
13162 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13163 */
13164-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13165-
13166+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13167+EXPORT_SYMBOL(init_tss);
13168diff -urNp linux-3.0.4/arch/x86/kernel/ioport.c linux-3.0.4/arch/x86/kernel/ioport.c
13169--- linux-3.0.4/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
13170+++ linux-3.0.4/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
13171@@ -6,6 +6,7 @@
13172 #include <linux/sched.h>
13173 #include <linux/kernel.h>
13174 #include <linux/capability.h>
13175+#include <linux/security.h>
13176 #include <linux/errno.h>
13177 #include <linux/types.h>
13178 #include <linux/ioport.h>
13179@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13180
13181 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13182 return -EINVAL;
13183+#ifdef CONFIG_GRKERNSEC_IO
13184+ if (turn_on && grsec_disable_privio) {
13185+ gr_handle_ioperm();
13186+ return -EPERM;
13187+ }
13188+#endif
13189 if (turn_on && !capable(CAP_SYS_RAWIO))
13190 return -EPERM;
13191
13192@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13193 * because the ->io_bitmap_max value must match the bitmap
13194 * contents:
13195 */
13196- tss = &per_cpu(init_tss, get_cpu());
13197+ tss = init_tss + get_cpu();
13198
13199 if (turn_on)
13200 bitmap_clear(t->io_bitmap_ptr, from, num);
13201@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13202 return -EINVAL;
13203 /* Trying to gain more privileges? */
13204 if (level > old) {
13205+#ifdef CONFIG_GRKERNSEC_IO
13206+ if (grsec_disable_privio) {
13207+ gr_handle_iopl();
13208+ return -EPERM;
13209+ }
13210+#endif
13211 if (!capable(CAP_SYS_RAWIO))
13212 return -EPERM;
13213 }
13214diff -urNp linux-3.0.4/arch/x86/kernel/irq_32.c linux-3.0.4/arch/x86/kernel/irq_32.c
13215--- linux-3.0.4/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
13216+++ linux-3.0.4/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
13217@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13218 __asm__ __volatile__("andl %%esp,%0" :
13219 "=r" (sp) : "0" (THREAD_SIZE - 1));
13220
13221- return sp < (sizeof(struct thread_info) + STACK_WARN);
13222+ return sp < STACK_WARN;
13223 }
13224
13225 static void print_stack_overflow(void)
13226@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13227 * per-CPU IRQ handling contexts (thread information and stack)
13228 */
13229 union irq_ctx {
13230- struct thread_info tinfo;
13231- u32 stack[THREAD_SIZE/sizeof(u32)];
13232+ unsigned long previous_esp;
13233+ u32 stack[THREAD_SIZE/sizeof(u32)];
13234 } __attribute__((aligned(THREAD_SIZE)));
13235
13236 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13237@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13238 static inline int
13239 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13240 {
13241- union irq_ctx *curctx, *irqctx;
13242+ union irq_ctx *irqctx;
13243 u32 *isp, arg1, arg2;
13244
13245- curctx = (union irq_ctx *) current_thread_info();
13246 irqctx = __this_cpu_read(hardirq_ctx);
13247
13248 /*
13249@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13250 * handler) we can't do that and just have to keep using the
13251 * current stack (which is the irq stack already after all)
13252 */
13253- if (unlikely(curctx == irqctx))
13254+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13255 return 0;
13256
13257 /* build the stack frame on the IRQ stack */
13258- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13259- irqctx->tinfo.task = curctx->tinfo.task;
13260- irqctx->tinfo.previous_esp = current_stack_pointer;
13261+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13262+ irqctx->previous_esp = current_stack_pointer;
13263
13264- /*
13265- * Copy the softirq bits in preempt_count so that the
13266- * softirq checks work in the hardirq context.
13267- */
13268- irqctx->tinfo.preempt_count =
13269- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13270- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13271+#ifdef CONFIG_PAX_MEMORY_UDEREF
13272+ __set_fs(MAKE_MM_SEG(0));
13273+#endif
13274
13275 if (unlikely(overflow))
13276 call_on_stack(print_stack_overflow, isp);
13277@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13278 : "0" (irq), "1" (desc), "2" (isp),
13279 "D" (desc->handle_irq)
13280 : "memory", "cc", "ecx");
13281+
13282+#ifdef CONFIG_PAX_MEMORY_UDEREF
13283+ __set_fs(current_thread_info()->addr_limit);
13284+#endif
13285+
13286 return 1;
13287 }
13288
13289@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13290 */
13291 void __cpuinit irq_ctx_init(int cpu)
13292 {
13293- union irq_ctx *irqctx;
13294-
13295 if (per_cpu(hardirq_ctx, cpu))
13296 return;
13297
13298- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13299- THREAD_FLAGS,
13300- THREAD_ORDER));
13301- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13302- irqctx->tinfo.cpu = cpu;
13303- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13304- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13305-
13306- per_cpu(hardirq_ctx, cpu) = irqctx;
13307-
13308- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13309- THREAD_FLAGS,
13310- THREAD_ORDER));
13311- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13312- irqctx->tinfo.cpu = cpu;
13313- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13314-
13315- per_cpu(softirq_ctx, cpu) = irqctx;
13316+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13317+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13318
13319 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13320 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13321@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13322 asmlinkage void do_softirq(void)
13323 {
13324 unsigned long flags;
13325- struct thread_info *curctx;
13326 union irq_ctx *irqctx;
13327 u32 *isp;
13328
13329@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13330 local_irq_save(flags);
13331
13332 if (local_softirq_pending()) {
13333- curctx = current_thread_info();
13334 irqctx = __this_cpu_read(softirq_ctx);
13335- irqctx->tinfo.task = curctx->task;
13336- irqctx->tinfo.previous_esp = current_stack_pointer;
13337+ irqctx->previous_esp = current_stack_pointer;
13338
13339 /* build the stack frame on the softirq stack */
13340- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13341+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13342+
13343+#ifdef CONFIG_PAX_MEMORY_UDEREF
13344+ __set_fs(MAKE_MM_SEG(0));
13345+#endif
13346
13347 call_on_stack(__do_softirq, isp);
13348+
13349+#ifdef CONFIG_PAX_MEMORY_UDEREF
13350+ __set_fs(current_thread_info()->addr_limit);
13351+#endif
13352+
13353 /*
13354 * Shouldn't happen, we returned above if in_interrupt():
13355 */
13356diff -urNp linux-3.0.4/arch/x86/kernel/irq.c linux-3.0.4/arch/x86/kernel/irq.c
13357--- linux-3.0.4/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
13358+++ linux-3.0.4/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
13359@@ -17,7 +17,7 @@
13360 #include <asm/mce.h>
13361 #include <asm/hw_irq.h>
13362
13363-atomic_t irq_err_count;
13364+atomic_unchecked_t irq_err_count;
13365
13366 /* Function pointer for generic interrupt vector handling */
13367 void (*x86_platform_ipi_callback)(void) = NULL;
13368@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13369 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13370 seq_printf(p, " Machine check polls\n");
13371 #endif
13372- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13373+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13374 #if defined(CONFIG_X86_IO_APIC)
13375- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13376+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13377 #endif
13378 return 0;
13379 }
13380@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13381
13382 u64 arch_irq_stat(void)
13383 {
13384- u64 sum = atomic_read(&irq_err_count);
13385+ u64 sum = atomic_read_unchecked(&irq_err_count);
13386
13387 #ifdef CONFIG_X86_IO_APIC
13388- sum += atomic_read(&irq_mis_count);
13389+ sum += atomic_read_unchecked(&irq_mis_count);
13390 #endif
13391 return sum;
13392 }
13393diff -urNp linux-3.0.4/arch/x86/kernel/kgdb.c linux-3.0.4/arch/x86/kernel/kgdb.c
13394--- linux-3.0.4/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
13395+++ linux-3.0.4/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
13396@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13397 #ifdef CONFIG_X86_32
13398 switch (regno) {
13399 case GDB_SS:
13400- if (!user_mode_vm(regs))
13401+ if (!user_mode(regs))
13402 *(unsigned long *)mem = __KERNEL_DS;
13403 break;
13404 case GDB_SP:
13405- if (!user_mode_vm(regs))
13406+ if (!user_mode(regs))
13407 *(unsigned long *)mem = kernel_stack_pointer(regs);
13408 break;
13409 case GDB_GS:
13410@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13411 case 'k':
13412 /* clear the trace bit */
13413 linux_regs->flags &= ~X86_EFLAGS_TF;
13414- atomic_set(&kgdb_cpu_doing_single_step, -1);
13415+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13416
13417 /* set the trace bit if we're stepping */
13418 if (remcomInBuffer[0] == 's') {
13419 linux_regs->flags |= X86_EFLAGS_TF;
13420- atomic_set(&kgdb_cpu_doing_single_step,
13421+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13422 raw_smp_processor_id());
13423 }
13424
13425@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13426 return NOTIFY_DONE;
13427
13428 case DIE_DEBUG:
13429- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13430+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13431 if (user_mode(regs))
13432 return single_step_cont(regs, args);
13433 break;
13434diff -urNp linux-3.0.4/arch/x86/kernel/kprobes.c linux-3.0.4/arch/x86/kernel/kprobes.c
13435--- linux-3.0.4/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
13436+++ linux-3.0.4/arch/x86/kernel/kprobes.c 2011-08-23 21:47:55.000000000 -0400
13437@@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13438 } __attribute__((packed)) *insn;
13439
13440 insn = (struct __arch_relative_insn *)from;
13441+
13442+ pax_open_kernel();
13443 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13444 insn->op = op;
13445+ pax_close_kernel();
13446 }
13447
13448 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13449@@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13450 kprobe_opcode_t opcode;
13451 kprobe_opcode_t *orig_opcodes = opcodes;
13452
13453- if (search_exception_tables((unsigned long)opcodes))
13454+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13455 return 0; /* Page fault may occur on this address. */
13456
13457 retry:
13458@@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13459 }
13460 }
13461 insn_get_length(&insn);
13462+ pax_open_kernel();
13463 memcpy(dest, insn.kaddr, insn.length);
13464+ pax_close_kernel();
13465
13466 #ifdef CONFIG_X86_64
13467 if (insn_rip_relative(&insn)) {
13468@@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13469 (u8 *) dest;
13470 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13471 disp = (u8 *) dest + insn_offset_displacement(&insn);
13472+ pax_open_kernel();
13473 *(s32 *) disp = (s32) newdisp;
13474+ pax_close_kernel();
13475 }
13476 #endif
13477 return insn.length;
13478@@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13479 */
13480 __copy_instruction(p->ainsn.insn, p->addr, 0);
13481
13482- if (can_boost(p->addr))
13483+ if (can_boost(ktla_ktva(p->addr)))
13484 p->ainsn.boostable = 0;
13485 else
13486 p->ainsn.boostable = -1;
13487
13488- p->opcode = *p->addr;
13489+ p->opcode = *(ktla_ktva(p->addr));
13490 }
13491
13492 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13493@@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13494 * nor set current_kprobe, because it doesn't use single
13495 * stepping.
13496 */
13497- regs->ip = (unsigned long)p->ainsn.insn;
13498+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13499 preempt_enable_no_resched();
13500 return;
13501 }
13502@@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13503 if (p->opcode == BREAKPOINT_INSTRUCTION)
13504 regs->ip = (unsigned long)p->addr;
13505 else
13506- regs->ip = (unsigned long)p->ainsn.insn;
13507+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13508 }
13509
13510 /*
13511@@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13512 setup_singlestep(p, regs, kcb, 0);
13513 return 1;
13514 }
13515- } else if (*addr != BREAKPOINT_INSTRUCTION) {
13516+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13517 /*
13518 * The breakpoint instruction was removed right
13519 * after we hit it. Another cpu has removed
13520@@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13521 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13522 {
13523 unsigned long *tos = stack_addr(regs);
13524- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13525+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13526 unsigned long orig_ip = (unsigned long)p->addr;
13527 kprobe_opcode_t *insn = p->ainsn.insn;
13528
13529@@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13530 struct die_args *args = data;
13531 int ret = NOTIFY_DONE;
13532
13533- if (args->regs && user_mode_vm(args->regs))
13534+ if (args->regs && user_mode(args->regs))
13535 return ret;
13536
13537 switch (val) {
13538@@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13539 * Verify if the address gap is in 2GB range, because this uses
13540 * a relative jump.
13541 */
13542- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13543+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13544 if (abs(rel) > 0x7fffffff)
13545 return -ERANGE;
13546
13547@@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13548 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13549
13550 /* Set probe function call */
13551- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13552+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13553
13554 /* Set returning jmp instruction at the tail of out-of-line buffer */
13555 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13556- (u8 *)op->kp.addr + op->optinsn.size);
13557+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13558
13559 flush_icache_range((unsigned long) buf,
13560 (unsigned long) buf + TMPL_END_IDX +
13561@@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13562 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13563
13564 /* Backup instructions which will be replaced by jump address */
13565- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13566+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13567 RELATIVE_ADDR_SIZE);
13568
13569 insn_buf[0] = RELATIVEJUMP_OPCODE;
13570diff -urNp linux-3.0.4/arch/x86/kernel/kvm.c linux-3.0.4/arch/x86/kernel/kvm.c
13571--- linux-3.0.4/arch/x86/kernel/kvm.c 2011-07-21 22:17:23.000000000 -0400
13572+++ linux-3.0.4/arch/x86/kernel/kvm.c 2011-08-24 18:10:12.000000000 -0400
13573@@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
13574 pv_mmu_ops.set_pud = kvm_set_pud;
13575 #if PAGETABLE_LEVELS == 4
13576 pv_mmu_ops.set_pgd = kvm_set_pgd;
13577+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
13578 #endif
13579 #endif
13580 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
13581diff -urNp linux-3.0.4/arch/x86/kernel/ldt.c linux-3.0.4/arch/x86/kernel/ldt.c
13582--- linux-3.0.4/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
13583+++ linux-3.0.4/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
13584@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13585 if (reload) {
13586 #ifdef CONFIG_SMP
13587 preempt_disable();
13588- load_LDT(pc);
13589+ load_LDT_nolock(pc);
13590 if (!cpumask_equal(mm_cpumask(current->mm),
13591 cpumask_of(smp_processor_id())))
13592 smp_call_function(flush_ldt, current->mm, 1);
13593 preempt_enable();
13594 #else
13595- load_LDT(pc);
13596+ load_LDT_nolock(pc);
13597 #endif
13598 }
13599 if (oldsize) {
13600@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13601 return err;
13602
13603 for (i = 0; i < old->size; i++)
13604- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13605+ write_ldt_entry(new->ldt, i, old->ldt + i);
13606 return 0;
13607 }
13608
13609@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13610 retval = copy_ldt(&mm->context, &old_mm->context);
13611 mutex_unlock(&old_mm->context.lock);
13612 }
13613+
13614+ if (tsk == current) {
13615+ mm->context.vdso = 0;
13616+
13617+#ifdef CONFIG_X86_32
13618+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13619+ mm->context.user_cs_base = 0UL;
13620+ mm->context.user_cs_limit = ~0UL;
13621+
13622+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13623+ cpus_clear(mm->context.cpu_user_cs_mask);
13624+#endif
13625+
13626+#endif
13627+#endif
13628+
13629+ }
13630+
13631 return retval;
13632 }
13633
13634@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13635 }
13636 }
13637
13638+#ifdef CONFIG_PAX_SEGMEXEC
13639+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13640+ error = -EINVAL;
13641+ goto out_unlock;
13642+ }
13643+#endif
13644+
13645 fill_ldt(&ldt, &ldt_info);
13646 if (oldmode)
13647 ldt.avl = 0;
13648diff -urNp linux-3.0.4/arch/x86/kernel/machine_kexec_32.c linux-3.0.4/arch/x86/kernel/machine_kexec_32.c
13649--- linux-3.0.4/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
13650+++ linux-3.0.4/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
13651@@ -27,7 +27,7 @@
13652 #include <asm/cacheflush.h>
13653 #include <asm/debugreg.h>
13654
13655-static void set_idt(void *newidt, __u16 limit)
13656+static void set_idt(struct desc_struct *newidt, __u16 limit)
13657 {
13658 struct desc_ptr curidt;
13659
13660@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13661 }
13662
13663
13664-static void set_gdt(void *newgdt, __u16 limit)
13665+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13666 {
13667 struct desc_ptr curgdt;
13668
13669@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13670 }
13671
13672 control_page = page_address(image->control_code_page);
13673- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13674+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13675
13676 relocate_kernel_ptr = control_page;
13677 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13678diff -urNp linux-3.0.4/arch/x86/kernel/microcode_intel.c linux-3.0.4/arch/x86/kernel/microcode_intel.c
13679--- linux-3.0.4/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
13680+++ linux-3.0.4/arch/x86/kernel/microcode_intel.c 2011-08-23 21:47:55.000000000 -0400
13681@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13682
13683 static int get_ucode_user(void *to, const void *from, size_t n)
13684 {
13685- return copy_from_user(to, from, n);
13686+ return copy_from_user(to, (__force const void __user *)from, n);
13687 }
13688
13689 static enum ucode_state
13690 request_microcode_user(int cpu, const void __user *buf, size_t size)
13691 {
13692- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13693+ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13694 }
13695
13696 static void microcode_fini_cpu(int cpu)
13697diff -urNp linux-3.0.4/arch/x86/kernel/module.c linux-3.0.4/arch/x86/kernel/module.c
13698--- linux-3.0.4/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
13699+++ linux-3.0.4/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
13700@@ -36,21 +36,66 @@
13701 #define DEBUGP(fmt...)
13702 #endif
13703
13704-void *module_alloc(unsigned long size)
13705+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13706 {
13707 if (PAGE_ALIGN(size) > MODULES_LEN)
13708 return NULL;
13709 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13710- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13711+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13712 -1, __builtin_return_address(0));
13713 }
13714
13715+void *module_alloc(unsigned long size)
13716+{
13717+
13718+#ifdef CONFIG_PAX_KERNEXEC
13719+ return __module_alloc(size, PAGE_KERNEL);
13720+#else
13721+ return __module_alloc(size, PAGE_KERNEL_EXEC);
13722+#endif
13723+
13724+}
13725+
13726 /* Free memory returned from module_alloc */
13727 void module_free(struct module *mod, void *module_region)
13728 {
13729 vfree(module_region);
13730 }
13731
13732+#ifdef CONFIG_PAX_KERNEXEC
13733+#ifdef CONFIG_X86_32
13734+void *module_alloc_exec(unsigned long size)
13735+{
13736+ struct vm_struct *area;
13737+
13738+ if (size == 0)
13739+ return NULL;
13740+
13741+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13742+ return area ? area->addr : NULL;
13743+}
13744+EXPORT_SYMBOL(module_alloc_exec);
13745+
13746+void module_free_exec(struct module *mod, void *module_region)
13747+{
13748+ vunmap(module_region);
13749+}
13750+EXPORT_SYMBOL(module_free_exec);
13751+#else
13752+void module_free_exec(struct module *mod, void *module_region)
13753+{
13754+ module_free(mod, module_region);
13755+}
13756+EXPORT_SYMBOL(module_free_exec);
13757+
13758+void *module_alloc_exec(unsigned long size)
13759+{
13760+ return __module_alloc(size, PAGE_KERNEL_RX);
13761+}
13762+EXPORT_SYMBOL(module_alloc_exec);
13763+#endif
13764+#endif
13765+
13766 /* We don't need anything special. */
13767 int module_frob_arch_sections(Elf_Ehdr *hdr,
13768 Elf_Shdr *sechdrs,
13769@@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13770 unsigned int i;
13771 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13772 Elf32_Sym *sym;
13773- uint32_t *location;
13774+ uint32_t *plocation, location;
13775
13776 DEBUGP("Applying relocate section %u to %u\n", relsec,
13777 sechdrs[relsec].sh_info);
13778 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13779 /* This is where to make the change */
13780- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13781- + rel[i].r_offset;
13782+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13783+ location = (uint32_t)plocation;
13784+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13785+ plocation = ktla_ktva((void *)plocation);
13786 /* This is the symbol it is referring to. Note that all
13787 undefined symbols have been resolved. */
13788 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13789@@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13790 switch (ELF32_R_TYPE(rel[i].r_info)) {
13791 case R_386_32:
13792 /* We add the value into the location given */
13793- *location += sym->st_value;
13794+ pax_open_kernel();
13795+ *plocation += sym->st_value;
13796+ pax_close_kernel();
13797 break;
13798 case R_386_PC32:
13799 /* Add the value, subtract its postition */
13800- *location += sym->st_value - (uint32_t)location;
13801+ pax_open_kernel();
13802+ *plocation += sym->st_value - location;
13803+ pax_close_kernel();
13804 break;
13805 default:
13806 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13807@@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13808 case R_X86_64_NONE:
13809 break;
13810 case R_X86_64_64:
13811+ pax_open_kernel();
13812 *(u64 *)loc = val;
13813+ pax_close_kernel();
13814 break;
13815 case R_X86_64_32:
13816+ pax_open_kernel();
13817 *(u32 *)loc = val;
13818+ pax_close_kernel();
13819 if (val != *(u32 *)loc)
13820 goto overflow;
13821 break;
13822 case R_X86_64_32S:
13823+ pax_open_kernel();
13824 *(s32 *)loc = val;
13825+ pax_close_kernel();
13826 if ((s64)val != *(s32 *)loc)
13827 goto overflow;
13828 break;
13829 case R_X86_64_PC32:
13830 val -= (u64)loc;
13831+ pax_open_kernel();
13832 *(u32 *)loc = val;
13833+ pax_close_kernel();
13834+
13835 #if 0
13836 if ((s64)val != *(s32 *)loc)
13837 goto overflow;
13838diff -urNp linux-3.0.4/arch/x86/kernel/paravirt.c linux-3.0.4/arch/x86/kernel/paravirt.c
13839--- linux-3.0.4/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
13840+++ linux-3.0.4/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
13841@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13842 {
13843 return x;
13844 }
13845+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13846+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13847+#endif
13848
13849 void __init default_banner(void)
13850 {
13851@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13852 * corresponding structure. */
13853 static void *get_call_destination(u8 type)
13854 {
13855- struct paravirt_patch_template tmpl = {
13856+ const struct paravirt_patch_template tmpl = {
13857 .pv_init_ops = pv_init_ops,
13858 .pv_time_ops = pv_time_ops,
13859 .pv_cpu_ops = pv_cpu_ops,
13860@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13861 .pv_lock_ops = pv_lock_ops,
13862 #endif
13863 };
13864+
13865+ pax_track_stack();
13866+
13867 return *((void **)&tmpl + type);
13868 }
13869
13870@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13871 if (opfunc == NULL)
13872 /* If there's no function, patch it with a ud2a (BUG) */
13873 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13874- else if (opfunc == _paravirt_nop)
13875+ else if (opfunc == (void *)_paravirt_nop)
13876 /* If the operation is a nop, then nop the callsite */
13877 ret = paravirt_patch_nop();
13878
13879 /* identity functions just return their single argument */
13880- else if (opfunc == _paravirt_ident_32)
13881+ else if (opfunc == (void *)_paravirt_ident_32)
13882 ret = paravirt_patch_ident_32(insnbuf, len);
13883- else if (opfunc == _paravirt_ident_64)
13884+ else if (opfunc == (void *)_paravirt_ident_64)
13885 ret = paravirt_patch_ident_64(insnbuf, len);
13886+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13887+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13888+ ret = paravirt_patch_ident_64(insnbuf, len);
13889+#endif
13890
13891 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13892 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13893@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13894 if (insn_len > len || start == NULL)
13895 insn_len = len;
13896 else
13897- memcpy(insnbuf, start, insn_len);
13898+ memcpy(insnbuf, ktla_ktva(start), insn_len);
13899
13900 return insn_len;
13901 }
13902@@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13903 preempt_enable();
13904 }
13905
13906-struct pv_info pv_info = {
13907+struct pv_info pv_info __read_only = {
13908 .name = "bare hardware",
13909 .paravirt_enabled = 0,
13910 .kernel_rpl = 0,
13911 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13912 };
13913
13914-struct pv_init_ops pv_init_ops = {
13915+struct pv_init_ops pv_init_ops __read_only = {
13916 .patch = native_patch,
13917 };
13918
13919-struct pv_time_ops pv_time_ops = {
13920+struct pv_time_ops pv_time_ops __read_only = {
13921 .sched_clock = native_sched_clock,
13922 };
13923
13924-struct pv_irq_ops pv_irq_ops = {
13925+struct pv_irq_ops pv_irq_ops __read_only = {
13926 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13927 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13928 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13929@@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
13930 #endif
13931 };
13932
13933-struct pv_cpu_ops pv_cpu_ops = {
13934+struct pv_cpu_ops pv_cpu_ops __read_only = {
13935 .cpuid = native_cpuid,
13936 .get_debugreg = native_get_debugreg,
13937 .set_debugreg = native_set_debugreg,
13938@@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
13939 .end_context_switch = paravirt_nop,
13940 };
13941
13942-struct pv_apic_ops pv_apic_ops = {
13943+struct pv_apic_ops pv_apic_ops __read_only = {
13944 #ifdef CONFIG_X86_LOCAL_APIC
13945 .startup_ipi_hook = paravirt_nop,
13946 #endif
13947 };
13948
13949-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
13950+#ifdef CONFIG_X86_32
13951+#ifdef CONFIG_X86_PAE
13952+/* 64-bit pagetable entries */
13953+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
13954+#else
13955 /* 32-bit pagetable entries */
13956 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
13957+#endif
13958 #else
13959 /* 64-bit pagetable entries */
13960 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
13961 #endif
13962
13963-struct pv_mmu_ops pv_mmu_ops = {
13964+struct pv_mmu_ops pv_mmu_ops __read_only = {
13965
13966 .read_cr2 = native_read_cr2,
13967 .write_cr2 = native_write_cr2,
13968@@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
13969 .make_pud = PTE_IDENT,
13970
13971 .set_pgd = native_set_pgd,
13972+ .set_pgd_batched = native_set_pgd_batched,
13973 #endif
13974 #endif /* PAGETABLE_LEVELS >= 3 */
13975
13976@@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
13977 },
13978
13979 .set_fixmap = native_set_fixmap,
13980+
13981+#ifdef CONFIG_PAX_KERNEXEC
13982+ .pax_open_kernel = native_pax_open_kernel,
13983+ .pax_close_kernel = native_pax_close_kernel,
13984+#endif
13985+
13986 };
13987
13988 EXPORT_SYMBOL_GPL(pv_time_ops);
13989diff -urNp linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c
13990--- linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
13991+++ linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
13992@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
13993 arch_spin_lock(lock);
13994 }
13995
13996-struct pv_lock_ops pv_lock_ops = {
13997+struct pv_lock_ops pv_lock_ops __read_only = {
13998 #ifdef CONFIG_SMP
13999 .spin_is_locked = __ticket_spin_is_locked,
14000 .spin_is_contended = __ticket_spin_is_contended,
14001diff -urNp linux-3.0.4/arch/x86/kernel/pci-iommu_table.c linux-3.0.4/arch/x86/kernel/pci-iommu_table.c
14002--- linux-3.0.4/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
14003+++ linux-3.0.4/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
14004@@ -2,7 +2,7 @@
14005 #include <asm/iommu_table.h>
14006 #include <linux/string.h>
14007 #include <linux/kallsyms.h>
14008-
14009+#include <linux/sched.h>
14010
14011 #define DEBUG 1
14012
14013@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14014 {
14015 struct iommu_table_entry *p, *q, *x;
14016
14017+ pax_track_stack();
14018+
14019 /* Simple cyclic dependency checker. */
14020 for (p = start; p < finish; p++) {
14021 q = find_dependents_of(start, finish, p);
14022diff -urNp linux-3.0.4/arch/x86/kernel/process_32.c linux-3.0.4/arch/x86/kernel/process_32.c
14023--- linux-3.0.4/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14024+++ linux-3.0.4/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14025@@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14026 unsigned long thread_saved_pc(struct task_struct *tsk)
14027 {
14028 return ((unsigned long *)tsk->thread.sp)[3];
14029+//XXX return tsk->thread.eip;
14030 }
14031
14032 #ifndef CONFIG_SMP
14033@@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14034 unsigned long sp;
14035 unsigned short ss, gs;
14036
14037- if (user_mode_vm(regs)) {
14038+ if (user_mode(regs)) {
14039 sp = regs->sp;
14040 ss = regs->ss & 0xffff;
14041- gs = get_user_gs(regs);
14042 } else {
14043 sp = kernel_stack_pointer(regs);
14044 savesegment(ss, ss);
14045- savesegment(gs, gs);
14046 }
14047+ gs = get_user_gs(regs);
14048
14049 show_regs_common();
14050
14051@@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14052 struct task_struct *tsk;
14053 int err;
14054
14055- childregs = task_pt_regs(p);
14056+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14057 *childregs = *regs;
14058 childregs->ax = 0;
14059 childregs->sp = sp;
14060
14061 p->thread.sp = (unsigned long) childregs;
14062 p->thread.sp0 = (unsigned long) (childregs+1);
14063+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14064
14065 p->thread.ip = (unsigned long) ret_from_fork;
14066
14067@@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14068 struct thread_struct *prev = &prev_p->thread,
14069 *next = &next_p->thread;
14070 int cpu = smp_processor_id();
14071- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14072+ struct tss_struct *tss = init_tss + cpu;
14073 bool preload_fpu;
14074
14075 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14076@@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14077 */
14078 lazy_save_gs(prev->gs);
14079
14080+#ifdef CONFIG_PAX_MEMORY_UDEREF
14081+ __set_fs(task_thread_info(next_p)->addr_limit);
14082+#endif
14083+
14084 /*
14085 * Load the per-thread Thread-Local Storage descriptor.
14086 */
14087@@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14088 */
14089 arch_end_context_switch(next_p);
14090
14091+ percpu_write(current_task, next_p);
14092+ percpu_write(current_tinfo, &next_p->tinfo);
14093+
14094 if (preload_fpu)
14095 __math_state_restore();
14096
14097@@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14098 if (prev->gs | next->gs)
14099 lazy_load_gs(next->gs);
14100
14101- percpu_write(current_task, next_p);
14102-
14103 return prev_p;
14104 }
14105
14106@@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14107 } while (count++ < 16);
14108 return 0;
14109 }
14110-
14111diff -urNp linux-3.0.4/arch/x86/kernel/process_64.c linux-3.0.4/arch/x86/kernel/process_64.c
14112--- linux-3.0.4/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
14113+++ linux-3.0.4/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
14114@@ -87,7 +87,7 @@ static void __exit_idle(void)
14115 void exit_idle(void)
14116 {
14117 /* idle loop has pid 0 */
14118- if (current->pid)
14119+ if (task_pid_nr(current))
14120 return;
14121 __exit_idle();
14122 }
14123@@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14124 struct pt_regs *childregs;
14125 struct task_struct *me = current;
14126
14127- childregs = ((struct pt_regs *)
14128- (THREAD_SIZE + task_stack_page(p))) - 1;
14129+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14130 *childregs = *regs;
14131
14132 childregs->ax = 0;
14133@@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14134 p->thread.sp = (unsigned long) childregs;
14135 p->thread.sp0 = (unsigned long) (childregs+1);
14136 p->thread.usersp = me->thread.usersp;
14137+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14138
14139 set_tsk_thread_flag(p, TIF_FORK);
14140
14141@@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14142 struct thread_struct *prev = &prev_p->thread;
14143 struct thread_struct *next = &next_p->thread;
14144 int cpu = smp_processor_id();
14145- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14146+ struct tss_struct *tss = init_tss + cpu;
14147 unsigned fsindex, gsindex;
14148 bool preload_fpu;
14149
14150@@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14151 prev->usersp = percpu_read(old_rsp);
14152 percpu_write(old_rsp, next->usersp);
14153 percpu_write(current_task, next_p);
14154+ percpu_write(current_tinfo, &next_p->tinfo);
14155
14156- percpu_write(kernel_stack,
14157- (unsigned long)task_stack_page(next_p) +
14158- THREAD_SIZE - KERNEL_STACK_OFFSET);
14159+ percpu_write(kernel_stack, next->sp0);
14160
14161 /*
14162 * Now maybe reload the debug registers and handle I/O bitmaps
14163@@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14164 if (!p || p == current || p->state == TASK_RUNNING)
14165 return 0;
14166 stack = (unsigned long)task_stack_page(p);
14167- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14168+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14169 return 0;
14170 fp = *(u64 *)(p->thread.sp);
14171 do {
14172- if (fp < (unsigned long)stack ||
14173- fp >= (unsigned long)stack+THREAD_SIZE)
14174+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14175 return 0;
14176 ip = *(u64 *)(fp+8);
14177 if (!in_sched_functions(ip))
14178diff -urNp linux-3.0.4/arch/x86/kernel/process.c linux-3.0.4/arch/x86/kernel/process.c
14179--- linux-3.0.4/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
14180+++ linux-3.0.4/arch/x86/kernel/process.c 2011-08-30 18:23:52.000000000 -0400
14181@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14182
14183 void free_thread_info(struct thread_info *ti)
14184 {
14185- free_thread_xstate(ti->task);
14186 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14187 }
14188
14189+static struct kmem_cache *task_struct_cachep;
14190+
14191 void arch_task_cache_init(void)
14192 {
14193- task_xstate_cachep =
14194- kmem_cache_create("task_xstate", xstate_size,
14195+ /* create a slab on which task_structs can be allocated */
14196+ task_struct_cachep =
14197+ kmem_cache_create("task_struct", sizeof(struct task_struct),
14198+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14199+
14200+ task_xstate_cachep =
14201+ kmem_cache_create("task_xstate", xstate_size,
14202 __alignof__(union thread_xstate),
14203- SLAB_PANIC | SLAB_NOTRACK, NULL);
14204+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14205+}
14206+
14207+struct task_struct *alloc_task_struct_node(int node)
14208+{
14209+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14210+}
14211+
14212+void free_task_struct(struct task_struct *task)
14213+{
14214+ free_thread_xstate(task);
14215+ kmem_cache_free(task_struct_cachep, task);
14216 }
14217
14218 /*
14219@@ -70,7 +87,7 @@ void exit_thread(void)
14220 unsigned long *bp = t->io_bitmap_ptr;
14221
14222 if (bp) {
14223- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14224+ struct tss_struct *tss = init_tss + get_cpu();
14225
14226 t->io_bitmap_ptr = NULL;
14227 clear_thread_flag(TIF_IO_BITMAP);
14228@@ -106,7 +123,7 @@ void show_regs_common(void)
14229
14230 printk(KERN_CONT "\n");
14231 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14232- current->pid, current->comm, print_tainted(),
14233+ task_pid_nr(current), current->comm, print_tainted(),
14234 init_utsname()->release,
14235 (int)strcspn(init_utsname()->version, " "),
14236 init_utsname()->version);
14237@@ -120,6 +137,9 @@ void flush_thread(void)
14238 {
14239 struct task_struct *tsk = current;
14240
14241+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14242+ loadsegment(gs, 0);
14243+#endif
14244 flush_ptrace_hw_breakpoint(tsk);
14245 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14246 /*
14247@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14248 regs.di = (unsigned long) arg;
14249
14250 #ifdef CONFIG_X86_32
14251- regs.ds = __USER_DS;
14252- regs.es = __USER_DS;
14253+ regs.ds = __KERNEL_DS;
14254+ regs.es = __KERNEL_DS;
14255 regs.fs = __KERNEL_PERCPU;
14256- regs.gs = __KERNEL_STACK_CANARY;
14257+ savesegment(gs, regs.gs);
14258 #else
14259 regs.ss = __KERNEL_DS;
14260 #endif
14261@@ -403,7 +423,7 @@ void default_idle(void)
14262 EXPORT_SYMBOL(default_idle);
14263 #endif
14264
14265-void stop_this_cpu(void *dummy)
14266+__noreturn void stop_this_cpu(void *dummy)
14267 {
14268 local_irq_disable();
14269 /*
14270@@ -668,16 +688,37 @@ static int __init idle_setup(char *str)
14271 }
14272 early_param("idle", idle_setup);
14273
14274-unsigned long arch_align_stack(unsigned long sp)
14275+#ifdef CONFIG_PAX_RANDKSTACK
14276+void pax_randomize_kstack(struct pt_regs *regs)
14277 {
14278- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14279- sp -= get_random_int() % 8192;
14280- return sp & ~0xf;
14281-}
14282+ struct thread_struct *thread = &current->thread;
14283+ unsigned long time;
14284
14285-unsigned long arch_randomize_brk(struct mm_struct *mm)
14286-{
14287- unsigned long range_end = mm->brk + 0x02000000;
14288- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14289-}
14290+ if (!randomize_va_space)
14291+ return;
14292+
14293+ if (v8086_mode(regs))
14294+ return;
14295
14296+ rdtscl(time);
14297+
14298+ /* P4 seems to return a 0 LSB, ignore it */
14299+#ifdef CONFIG_MPENTIUM4
14300+ time &= 0x3EUL;
14301+ time <<= 2;
14302+#elif defined(CONFIG_X86_64)
14303+ time &= 0xFUL;
14304+ time <<= 4;
14305+#else
14306+ time &= 0x1FUL;
14307+ time <<= 3;
14308+#endif
14309+
14310+ thread->sp0 ^= time;
14311+ load_sp0(init_tss + smp_processor_id(), thread);
14312+
14313+#ifdef CONFIG_X86_64
14314+ percpu_write(kernel_stack, thread->sp0);
14315+#endif
14316+}
14317+#endif
14318diff -urNp linux-3.0.4/arch/x86/kernel/ptrace.c linux-3.0.4/arch/x86/kernel/ptrace.c
14319--- linux-3.0.4/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
14320+++ linux-3.0.4/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
14321@@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14322 unsigned long addr, unsigned long data)
14323 {
14324 int ret;
14325- unsigned long __user *datap = (unsigned long __user *)data;
14326+ unsigned long __user *datap = (__force unsigned long __user *)data;
14327
14328 switch (request) {
14329 /* read the word at location addr in the USER area. */
14330@@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14331 if ((int) addr < 0)
14332 return -EIO;
14333 ret = do_get_thread_area(child, addr,
14334- (struct user_desc __user *)data);
14335+ (__force struct user_desc __user *) data);
14336 break;
14337
14338 case PTRACE_SET_THREAD_AREA:
14339 if ((int) addr < 0)
14340 return -EIO;
14341 ret = do_set_thread_area(child, addr,
14342- (struct user_desc __user *)data, 0);
14343+ (__force struct user_desc __user *) data, 0);
14344 break;
14345 #endif
14346
14347@@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14348 memset(info, 0, sizeof(*info));
14349 info->si_signo = SIGTRAP;
14350 info->si_code = si_code;
14351- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14352+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14353 }
14354
14355 void user_single_step_siginfo(struct task_struct *tsk,
14356diff -urNp linux-3.0.4/arch/x86/kernel/pvclock.c linux-3.0.4/arch/x86/kernel/pvclock.c
14357--- linux-3.0.4/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
14358+++ linux-3.0.4/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
14359@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14360 return pv_tsc_khz;
14361 }
14362
14363-static atomic64_t last_value = ATOMIC64_INIT(0);
14364+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14365
14366 void pvclock_resume(void)
14367 {
14368- atomic64_set(&last_value, 0);
14369+ atomic64_set_unchecked(&last_value, 0);
14370 }
14371
14372 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14373@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14374 * updating at the same time, and one of them could be slightly behind,
14375 * making the assumption that last_value always go forward fail to hold.
14376 */
14377- last = atomic64_read(&last_value);
14378+ last = atomic64_read_unchecked(&last_value);
14379 do {
14380 if (ret < last)
14381 return last;
14382- last = atomic64_cmpxchg(&last_value, last, ret);
14383+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14384 } while (unlikely(last != ret));
14385
14386 return ret;
14387diff -urNp linux-3.0.4/arch/x86/kernel/reboot.c linux-3.0.4/arch/x86/kernel/reboot.c
14388--- linux-3.0.4/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
14389+++ linux-3.0.4/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
14390@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14391 EXPORT_SYMBOL(pm_power_off);
14392
14393 static const struct desc_ptr no_idt = {};
14394-static int reboot_mode;
14395+static unsigned short reboot_mode;
14396 enum reboot_type reboot_type = BOOT_ACPI;
14397 int reboot_force;
14398
14399@@ -315,13 +315,17 @@ core_initcall(reboot_init);
14400 extern const unsigned char machine_real_restart_asm[];
14401 extern const u64 machine_real_restart_gdt[3];
14402
14403-void machine_real_restart(unsigned int type)
14404+__noreturn void machine_real_restart(unsigned int type)
14405 {
14406 void *restart_va;
14407 unsigned long restart_pa;
14408- void (*restart_lowmem)(unsigned int);
14409+ void (* __noreturn restart_lowmem)(unsigned int);
14410 u64 *lowmem_gdt;
14411
14412+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14413+ struct desc_struct *gdt;
14414+#endif
14415+
14416 local_irq_disable();
14417
14418 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14419@@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
14420 boot)". This seems like a fairly standard thing that gets set by
14421 REBOOT.COM programs, and the previous reset routine did this
14422 too. */
14423- *((unsigned short *)0x472) = reboot_mode;
14424+ *(unsigned short *)(__va(0x472)) = reboot_mode;
14425
14426 /* Patch the GDT in the low memory trampoline */
14427 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14428
14429 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14430 restart_pa = virt_to_phys(restart_va);
14431- restart_lowmem = (void (*)(unsigned int))restart_pa;
14432+ restart_lowmem = (void *)restart_pa;
14433
14434 /* GDT[0]: GDT self-pointer */
14435 lowmem_gdt[0] =
14436@@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
14437 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14438
14439 /* Jump to the identity-mapped low memory code */
14440+
14441+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14442+ gdt = get_cpu_gdt_table(smp_processor_id());
14443+ pax_open_kernel();
14444+#ifdef CONFIG_PAX_MEMORY_UDEREF
14445+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14446+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14447+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14448+#endif
14449+#ifdef CONFIG_PAX_KERNEXEC
14450+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14451+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14452+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14453+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14454+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14455+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14456+#endif
14457+ pax_close_kernel();
14458+#endif
14459+
14460+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14461+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14462+ unreachable();
14463+#else
14464 restart_lowmem(type);
14465+#endif
14466+
14467 }
14468 #ifdef CONFIG_APM_MODULE
14469 EXPORT_SYMBOL(machine_real_restart);
14470@@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
14471 * try to force a triple fault and then cycle between hitting the keyboard
14472 * controller and doing that
14473 */
14474-static void native_machine_emergency_restart(void)
14475+__noreturn static void native_machine_emergency_restart(void)
14476 {
14477 int i;
14478 int attempt = 0;
14479@@ -647,13 +677,13 @@ void native_machine_shutdown(void)
14480 #endif
14481 }
14482
14483-static void __machine_emergency_restart(int emergency)
14484+static __noreturn void __machine_emergency_restart(int emergency)
14485 {
14486 reboot_emergency = emergency;
14487 machine_ops.emergency_restart();
14488 }
14489
14490-static void native_machine_restart(char *__unused)
14491+static __noreturn void native_machine_restart(char *__unused)
14492 {
14493 printk("machine restart\n");
14494
14495@@ -662,7 +692,7 @@ static void native_machine_restart(char
14496 __machine_emergency_restart(0);
14497 }
14498
14499-static void native_machine_halt(void)
14500+static __noreturn void native_machine_halt(void)
14501 {
14502 /* stop other cpus and apics */
14503 machine_shutdown();
14504@@ -673,7 +703,7 @@ static void native_machine_halt(void)
14505 stop_this_cpu(NULL);
14506 }
14507
14508-static void native_machine_power_off(void)
14509+__noreturn static void native_machine_power_off(void)
14510 {
14511 if (pm_power_off) {
14512 if (!reboot_force)
14513@@ -682,6 +712,7 @@ static void native_machine_power_off(voi
14514 }
14515 /* a fallback in case there is no PM info available */
14516 tboot_shutdown(TB_SHUTDOWN_HALT);
14517+ unreachable();
14518 }
14519
14520 struct machine_ops machine_ops = {
14521diff -urNp linux-3.0.4/arch/x86/kernel/setup.c linux-3.0.4/arch/x86/kernel/setup.c
14522--- linux-3.0.4/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
14523+++ linux-3.0.4/arch/x86/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
14524@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14525 * area (640->1Mb) as ram even though it is not.
14526 * take them out.
14527 */
14528- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14529+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14530 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14531 }
14532
14533@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
14534
14535 if (!boot_params.hdr.root_flags)
14536 root_mountflags &= ~MS_RDONLY;
14537- init_mm.start_code = (unsigned long) _text;
14538- init_mm.end_code = (unsigned long) _etext;
14539+ init_mm.start_code = ktla_ktva((unsigned long) _text);
14540+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
14541 init_mm.end_data = (unsigned long) _edata;
14542 init_mm.brk = _brk_end;
14543
14544- code_resource.start = virt_to_phys(_text);
14545- code_resource.end = virt_to_phys(_etext)-1;
14546- data_resource.start = virt_to_phys(_etext);
14547+ code_resource.start = virt_to_phys(ktla_ktva(_text));
14548+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14549+ data_resource.start = virt_to_phys(_sdata);
14550 data_resource.end = virt_to_phys(_edata)-1;
14551 bss_resource.start = virt_to_phys(&__bss_start);
14552 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14553diff -urNp linux-3.0.4/arch/x86/kernel/setup_percpu.c linux-3.0.4/arch/x86/kernel/setup_percpu.c
14554--- linux-3.0.4/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
14555+++ linux-3.0.4/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
14556@@ -21,19 +21,17 @@
14557 #include <asm/cpu.h>
14558 #include <asm/stackprotector.h>
14559
14560-DEFINE_PER_CPU(int, cpu_number);
14561+#ifdef CONFIG_SMP
14562+DEFINE_PER_CPU(unsigned int, cpu_number);
14563 EXPORT_PER_CPU_SYMBOL(cpu_number);
14564+#endif
14565
14566-#ifdef CONFIG_X86_64
14567 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14568-#else
14569-#define BOOT_PERCPU_OFFSET 0
14570-#endif
14571
14572 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14573 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14574
14575-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14576+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14577 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14578 };
14579 EXPORT_SYMBOL(__per_cpu_offset);
14580@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14581 {
14582 #ifdef CONFIG_X86_32
14583 struct desc_struct gdt;
14584+ unsigned long base = per_cpu_offset(cpu);
14585
14586- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14587- 0x2 | DESCTYPE_S, 0x8);
14588- gdt.s = 1;
14589+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14590+ 0x83 | DESCTYPE_S, 0xC);
14591 write_gdt_entry(get_cpu_gdt_table(cpu),
14592 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14593 #endif
14594@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14595 /* alrighty, percpu areas up and running */
14596 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14597 for_each_possible_cpu(cpu) {
14598+#ifdef CONFIG_CC_STACKPROTECTOR
14599+#ifdef CONFIG_X86_32
14600+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
14601+#endif
14602+#endif
14603 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14604 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14605 per_cpu(cpu_number, cpu) = cpu;
14606@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14607 */
14608 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14609 #endif
14610+#ifdef CONFIG_CC_STACKPROTECTOR
14611+#ifdef CONFIG_X86_32
14612+ if (!cpu)
14613+ per_cpu(stack_canary.canary, cpu) = canary;
14614+#endif
14615+#endif
14616 /*
14617 * Up to this point, the boot CPU has been using .init.data
14618 * area. Reload any changed state for the boot CPU.
14619diff -urNp linux-3.0.4/arch/x86/kernel/signal.c linux-3.0.4/arch/x86/kernel/signal.c
14620--- linux-3.0.4/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
14621+++ linux-3.0.4/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
14622@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14623 * Align the stack pointer according to the i386 ABI,
14624 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14625 */
14626- sp = ((sp + 4) & -16ul) - 4;
14627+ sp = ((sp - 12) & -16ul) - 4;
14628 #else /* !CONFIG_X86_32 */
14629 sp = round_down(sp, 16) - 8;
14630 #endif
14631@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14632 * Return an always-bogus address instead so we will die with SIGSEGV.
14633 */
14634 if (onsigstack && !likely(on_sig_stack(sp)))
14635- return (void __user *)-1L;
14636+ return (__force void __user *)-1L;
14637
14638 /* save i387 state */
14639 if (used_math() && save_i387_xstate(*fpstate) < 0)
14640- return (void __user *)-1L;
14641+ return (__force void __user *)-1L;
14642
14643 return (void __user *)sp;
14644 }
14645@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14646 }
14647
14648 if (current->mm->context.vdso)
14649- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14650+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14651 else
14652- restorer = &frame->retcode;
14653+ restorer = (void __user *)&frame->retcode;
14654 if (ka->sa.sa_flags & SA_RESTORER)
14655 restorer = ka->sa.sa_restorer;
14656
14657@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14658 * reasons and because gdb uses it as a signature to notice
14659 * signal handler stack frames.
14660 */
14661- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14662+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14663
14664 if (err)
14665 return -EFAULT;
14666@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14667 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14668
14669 /* Set up to return from userspace. */
14670- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14671+ if (current->mm->context.vdso)
14672+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14673+ else
14674+ restorer = (void __user *)&frame->retcode;
14675 if (ka->sa.sa_flags & SA_RESTORER)
14676 restorer = ka->sa.sa_restorer;
14677 put_user_ex(restorer, &frame->pretcode);
14678@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14679 * reasons and because gdb uses it as a signature to notice
14680 * signal handler stack frames.
14681 */
14682- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14683+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14684 } put_user_catch(err);
14685
14686 if (err)
14687@@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
14688 int signr;
14689 sigset_t *oldset;
14690
14691+ pax_track_stack();
14692+
14693 /*
14694 * We want the common case to go fast, which is why we may in certain
14695 * cases get here from kernel mode. Just return without doing anything
14696@@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
14697 * X86_32: vm86 regs switched out by assembly code before reaching
14698 * here, so testing against kernel CS suffices.
14699 */
14700- if (!user_mode(regs))
14701+ if (!user_mode_novm(regs))
14702 return;
14703
14704 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14705diff -urNp linux-3.0.4/arch/x86/kernel/smpboot.c linux-3.0.4/arch/x86/kernel/smpboot.c
14706--- linux-3.0.4/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
14707+++ linux-3.0.4/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
14708@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14709 set_idle_for_cpu(cpu, c_idle.idle);
14710 do_rest:
14711 per_cpu(current_task, cpu) = c_idle.idle;
14712+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14713 #ifdef CONFIG_X86_32
14714 /* Stack for startup_32 can be just as for start_secondary onwards */
14715 irq_ctx_init(cpu);
14716 #else
14717 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14718 initial_gs = per_cpu_offset(cpu);
14719- per_cpu(kernel_stack, cpu) =
14720- (unsigned long)task_stack_page(c_idle.idle) -
14721- KERNEL_STACK_OFFSET + THREAD_SIZE;
14722+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14723 #endif
14724+
14725+ pax_open_kernel();
14726 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14727+ pax_close_kernel();
14728+
14729 initial_code = (unsigned long)start_secondary;
14730 stack_start = c_idle.idle->thread.sp;
14731
14732@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14733
14734 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14735
14736+#ifdef CONFIG_PAX_PER_CPU_PGD
14737+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14738+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14739+ KERNEL_PGD_PTRS);
14740+#endif
14741+
14742 err = do_boot_cpu(apicid, cpu);
14743 if (err) {
14744 pr_debug("do_boot_cpu failed %d\n", err);
14745diff -urNp linux-3.0.4/arch/x86/kernel/step.c linux-3.0.4/arch/x86/kernel/step.c
14746--- linux-3.0.4/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
14747+++ linux-3.0.4/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
14748@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14749 struct desc_struct *desc;
14750 unsigned long base;
14751
14752- seg &= ~7UL;
14753+ seg >>= 3;
14754
14755 mutex_lock(&child->mm->context.lock);
14756- if (unlikely((seg >> 3) >= child->mm->context.size))
14757+ if (unlikely(seg >= child->mm->context.size))
14758 addr = -1L; /* bogus selector, access would fault */
14759 else {
14760 desc = child->mm->context.ldt + seg;
14761@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14762 addr += base;
14763 }
14764 mutex_unlock(&child->mm->context.lock);
14765- }
14766+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14767+ addr = ktla_ktva(addr);
14768
14769 return addr;
14770 }
14771@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14772 unsigned char opcode[15];
14773 unsigned long addr = convert_ip_to_linear(child, regs);
14774
14775+ if (addr == -EINVAL)
14776+ return 0;
14777+
14778 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14779 for (i = 0; i < copied; i++) {
14780 switch (opcode[i]) {
14781@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14782
14783 #ifdef CONFIG_X86_64
14784 case 0x40 ... 0x4f:
14785- if (regs->cs != __USER_CS)
14786+ if ((regs->cs & 0xffff) != __USER_CS)
14787 /* 32-bit mode: register increment */
14788 return 0;
14789 /* 64-bit mode: REX prefix */
14790diff -urNp linux-3.0.4/arch/x86/kernel/syscall_table_32.S linux-3.0.4/arch/x86/kernel/syscall_table_32.S
14791--- linux-3.0.4/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
14792+++ linux-3.0.4/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
14793@@ -1,3 +1,4 @@
14794+.section .rodata,"a",@progbits
14795 ENTRY(sys_call_table)
14796 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14797 .long sys_exit
14798diff -urNp linux-3.0.4/arch/x86/kernel/sys_i386_32.c linux-3.0.4/arch/x86/kernel/sys_i386_32.c
14799--- linux-3.0.4/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
14800+++ linux-3.0.4/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
14801@@ -24,17 +24,224 @@
14802
14803 #include <asm/syscalls.h>
14804
14805-/*
14806- * Do a system call from kernel instead of calling sys_execve so we
14807- * end up with proper pt_regs.
14808- */
14809-int kernel_execve(const char *filename,
14810- const char *const argv[],
14811- const char *const envp[])
14812+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14813 {
14814- long __res;
14815- asm volatile ("int $0x80"
14816- : "=a" (__res)
14817- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14818- return __res;
14819+ unsigned long pax_task_size = TASK_SIZE;
14820+
14821+#ifdef CONFIG_PAX_SEGMEXEC
14822+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14823+ pax_task_size = SEGMEXEC_TASK_SIZE;
14824+#endif
14825+
14826+ if (len > pax_task_size || addr > pax_task_size - len)
14827+ return -EINVAL;
14828+
14829+ return 0;
14830+}
14831+
14832+unsigned long
14833+arch_get_unmapped_area(struct file *filp, unsigned long addr,
14834+ unsigned long len, unsigned long pgoff, unsigned long flags)
14835+{
14836+ struct mm_struct *mm = current->mm;
14837+ struct vm_area_struct *vma;
14838+ unsigned long start_addr, pax_task_size = TASK_SIZE;
14839+
14840+#ifdef CONFIG_PAX_SEGMEXEC
14841+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
14842+ pax_task_size = SEGMEXEC_TASK_SIZE;
14843+#endif
14844+
14845+ pax_task_size -= PAGE_SIZE;
14846+
14847+ if (len > pax_task_size)
14848+ return -ENOMEM;
14849+
14850+ if (flags & MAP_FIXED)
14851+ return addr;
14852+
14853+#ifdef CONFIG_PAX_RANDMMAP
14854+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14855+#endif
14856+
14857+ if (addr) {
14858+ addr = PAGE_ALIGN(addr);
14859+ if (pax_task_size - len >= addr) {
14860+ vma = find_vma(mm, addr);
14861+ if (check_heap_stack_gap(vma, addr, len))
14862+ return addr;
14863+ }
14864+ }
14865+ if (len > mm->cached_hole_size) {
14866+ start_addr = addr = mm->free_area_cache;
14867+ } else {
14868+ start_addr = addr = mm->mmap_base;
14869+ mm->cached_hole_size = 0;
14870+ }
14871+
14872+#ifdef CONFIG_PAX_PAGEEXEC
14873+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14874+ start_addr = 0x00110000UL;
14875+
14876+#ifdef CONFIG_PAX_RANDMMAP
14877+ if (mm->pax_flags & MF_PAX_RANDMMAP)
14878+ start_addr += mm->delta_mmap & 0x03FFF000UL;
14879+#endif
14880+
14881+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14882+ start_addr = addr = mm->mmap_base;
14883+ else
14884+ addr = start_addr;
14885+ }
14886+#endif
14887+
14888+full_search:
14889+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14890+ /* At this point: (!vma || addr < vma->vm_end). */
14891+ if (pax_task_size - len < addr) {
14892+ /*
14893+ * Start a new search - just in case we missed
14894+ * some holes.
14895+ */
14896+ if (start_addr != mm->mmap_base) {
14897+ start_addr = addr = mm->mmap_base;
14898+ mm->cached_hole_size = 0;
14899+ goto full_search;
14900+ }
14901+ return -ENOMEM;
14902+ }
14903+ if (check_heap_stack_gap(vma, addr, len))
14904+ break;
14905+ if (addr + mm->cached_hole_size < vma->vm_start)
14906+ mm->cached_hole_size = vma->vm_start - addr;
14907+ addr = vma->vm_end;
14908+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
14909+ start_addr = addr = mm->mmap_base;
14910+ mm->cached_hole_size = 0;
14911+ goto full_search;
14912+ }
14913+ }
14914+
14915+ /*
14916+ * Remember the place where we stopped the search:
14917+ */
14918+ mm->free_area_cache = addr + len;
14919+ return addr;
14920+}
14921+
14922+unsigned long
14923+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14924+ const unsigned long len, const unsigned long pgoff,
14925+ const unsigned long flags)
14926+{
14927+ struct vm_area_struct *vma;
14928+ struct mm_struct *mm = current->mm;
14929+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
14930+
14931+#ifdef CONFIG_PAX_SEGMEXEC
14932+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
14933+ pax_task_size = SEGMEXEC_TASK_SIZE;
14934+#endif
14935+
14936+ pax_task_size -= PAGE_SIZE;
14937+
14938+ /* requested length too big for entire address space */
14939+ if (len > pax_task_size)
14940+ return -ENOMEM;
14941+
14942+ if (flags & MAP_FIXED)
14943+ return addr;
14944+
14945+#ifdef CONFIG_PAX_PAGEEXEC
14946+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
14947+ goto bottomup;
14948+#endif
14949+
14950+#ifdef CONFIG_PAX_RANDMMAP
14951+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14952+#endif
14953+
14954+ /* requesting a specific address */
14955+ if (addr) {
14956+ addr = PAGE_ALIGN(addr);
14957+ if (pax_task_size - len >= addr) {
14958+ vma = find_vma(mm, addr);
14959+ if (check_heap_stack_gap(vma, addr, len))
14960+ return addr;
14961+ }
14962+ }
14963+
14964+ /* check if free_area_cache is useful for us */
14965+ if (len <= mm->cached_hole_size) {
14966+ mm->cached_hole_size = 0;
14967+ mm->free_area_cache = mm->mmap_base;
14968+ }
14969+
14970+ /* either no address requested or can't fit in requested address hole */
14971+ addr = mm->free_area_cache;
14972+
14973+ /* make sure it can fit in the remaining address space */
14974+ if (addr > len) {
14975+ vma = find_vma(mm, addr-len);
14976+ if (check_heap_stack_gap(vma, addr - len, len))
14977+ /* remember the address as a hint for next time */
14978+ return (mm->free_area_cache = addr-len);
14979+ }
14980+
14981+ if (mm->mmap_base < len)
14982+ goto bottomup;
14983+
14984+ addr = mm->mmap_base-len;
14985+
14986+ do {
14987+ /*
14988+ * Lookup failure means no vma is above this address,
14989+ * else if new region fits below vma->vm_start,
14990+ * return with success:
14991+ */
14992+ vma = find_vma(mm, addr);
14993+ if (check_heap_stack_gap(vma, addr, len))
14994+ /* remember the address as a hint for next time */
14995+ return (mm->free_area_cache = addr);
14996+
14997+ /* remember the largest hole we saw so far */
14998+ if (addr + mm->cached_hole_size < vma->vm_start)
14999+ mm->cached_hole_size = vma->vm_start - addr;
15000+
15001+ /* try just below the current vma->vm_start */
15002+ addr = skip_heap_stack_gap(vma, len);
15003+ } while (!IS_ERR_VALUE(addr));
15004+
15005+bottomup:
15006+ /*
15007+ * A failed mmap() very likely causes application failure,
15008+ * so fall back to the bottom-up function here. This scenario
15009+ * can happen with large stack limits and large mmap()
15010+ * allocations.
15011+ */
15012+
15013+#ifdef CONFIG_PAX_SEGMEXEC
15014+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15015+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15016+ else
15017+#endif
15018+
15019+ mm->mmap_base = TASK_UNMAPPED_BASE;
15020+
15021+#ifdef CONFIG_PAX_RANDMMAP
15022+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15023+ mm->mmap_base += mm->delta_mmap;
15024+#endif
15025+
15026+ mm->free_area_cache = mm->mmap_base;
15027+ mm->cached_hole_size = ~0UL;
15028+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15029+ /*
15030+ * Restore the topdown base:
15031+ */
15032+ mm->mmap_base = base;
15033+ mm->free_area_cache = base;
15034+ mm->cached_hole_size = ~0UL;
15035+
15036+ return addr;
15037 }
15038diff -urNp linux-3.0.4/arch/x86/kernel/sys_x86_64.c linux-3.0.4/arch/x86/kernel/sys_x86_64.c
15039--- linux-3.0.4/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15040+++ linux-3.0.4/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15041@@ -32,8 +32,8 @@ out:
15042 return error;
15043 }
15044
15045-static void find_start_end(unsigned long flags, unsigned long *begin,
15046- unsigned long *end)
15047+static void find_start_end(struct mm_struct *mm, unsigned long flags,
15048+ unsigned long *begin, unsigned long *end)
15049 {
15050 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15051 unsigned long new_begin;
15052@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15053 *begin = new_begin;
15054 }
15055 } else {
15056- *begin = TASK_UNMAPPED_BASE;
15057+ *begin = mm->mmap_base;
15058 *end = TASK_SIZE;
15059 }
15060 }
15061@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15062 if (flags & MAP_FIXED)
15063 return addr;
15064
15065- find_start_end(flags, &begin, &end);
15066+ find_start_end(mm, flags, &begin, &end);
15067
15068 if (len > end)
15069 return -ENOMEM;
15070
15071+#ifdef CONFIG_PAX_RANDMMAP
15072+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15073+#endif
15074+
15075 if (addr) {
15076 addr = PAGE_ALIGN(addr);
15077 vma = find_vma(mm, addr);
15078- if (end - len >= addr &&
15079- (!vma || addr + len <= vma->vm_start))
15080+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15081 return addr;
15082 }
15083 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15084@@ -106,7 +109,7 @@ full_search:
15085 }
15086 return -ENOMEM;
15087 }
15088- if (!vma || addr + len <= vma->vm_start) {
15089+ if (check_heap_stack_gap(vma, addr, len)) {
15090 /*
15091 * Remember the place where we stopped the search:
15092 */
15093@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15094 {
15095 struct vm_area_struct *vma;
15096 struct mm_struct *mm = current->mm;
15097- unsigned long addr = addr0;
15098+ unsigned long base = mm->mmap_base, addr = addr0;
15099
15100 /* requested length too big for entire address space */
15101 if (len > TASK_SIZE)
15102@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15103 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15104 goto bottomup;
15105
15106+#ifdef CONFIG_PAX_RANDMMAP
15107+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15108+#endif
15109+
15110 /* requesting a specific address */
15111 if (addr) {
15112 addr = PAGE_ALIGN(addr);
15113- vma = find_vma(mm, addr);
15114- if (TASK_SIZE - len >= addr &&
15115- (!vma || addr + len <= vma->vm_start))
15116- return addr;
15117+ if (TASK_SIZE - len >= addr) {
15118+ vma = find_vma(mm, addr);
15119+ if (check_heap_stack_gap(vma, addr, len))
15120+ return addr;
15121+ }
15122 }
15123
15124 /* check if free_area_cache is useful for us */
15125@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15126 /* make sure it can fit in the remaining address space */
15127 if (addr > len) {
15128 vma = find_vma(mm, addr-len);
15129- if (!vma || addr <= vma->vm_start)
15130+ if (check_heap_stack_gap(vma, addr - len, len))
15131 /* remember the address as a hint for next time */
15132 return mm->free_area_cache = addr-len;
15133 }
15134@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15135 * return with success:
15136 */
15137 vma = find_vma(mm, addr);
15138- if (!vma || addr+len <= vma->vm_start)
15139+ if (check_heap_stack_gap(vma, addr, len))
15140 /* remember the address as a hint for next time */
15141 return mm->free_area_cache = addr;
15142
15143@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15144 mm->cached_hole_size = vma->vm_start - addr;
15145
15146 /* try just below the current vma->vm_start */
15147- addr = vma->vm_start-len;
15148- } while (len < vma->vm_start);
15149+ addr = skip_heap_stack_gap(vma, len);
15150+ } while (!IS_ERR_VALUE(addr));
15151
15152 bottomup:
15153 /*
15154@@ -198,13 +206,21 @@ bottomup:
15155 * can happen with large stack limits and large mmap()
15156 * allocations.
15157 */
15158+ mm->mmap_base = TASK_UNMAPPED_BASE;
15159+
15160+#ifdef CONFIG_PAX_RANDMMAP
15161+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15162+ mm->mmap_base += mm->delta_mmap;
15163+#endif
15164+
15165+ mm->free_area_cache = mm->mmap_base;
15166 mm->cached_hole_size = ~0UL;
15167- mm->free_area_cache = TASK_UNMAPPED_BASE;
15168 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15169 /*
15170 * Restore the topdown base:
15171 */
15172- mm->free_area_cache = mm->mmap_base;
15173+ mm->mmap_base = base;
15174+ mm->free_area_cache = base;
15175 mm->cached_hole_size = ~0UL;
15176
15177 return addr;
15178diff -urNp linux-3.0.4/arch/x86/kernel/tboot.c linux-3.0.4/arch/x86/kernel/tboot.c
15179--- linux-3.0.4/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15180+++ linux-3.0.4/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15181@@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15182
15183 void tboot_shutdown(u32 shutdown_type)
15184 {
15185- void (*shutdown)(void);
15186+ void (* __noreturn shutdown)(void);
15187
15188 if (!tboot_enabled())
15189 return;
15190@@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15191
15192 switch_to_tboot_pt();
15193
15194- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15195+ shutdown = (void *)tboot->shutdown_entry;
15196 shutdown();
15197
15198 /* should not reach here */
15199@@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15200 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15201 }
15202
15203-static atomic_t ap_wfs_count;
15204+static atomic_unchecked_t ap_wfs_count;
15205
15206 static int tboot_wait_for_aps(int num_aps)
15207 {
15208@@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15209 {
15210 switch (action) {
15211 case CPU_DYING:
15212- atomic_inc(&ap_wfs_count);
15213+ atomic_inc_unchecked(&ap_wfs_count);
15214 if (num_online_cpus() == 1)
15215- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15216+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15217 return NOTIFY_BAD;
15218 break;
15219 }
15220@@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15221
15222 tboot_create_trampoline();
15223
15224- atomic_set(&ap_wfs_count, 0);
15225+ atomic_set_unchecked(&ap_wfs_count, 0);
15226 register_hotcpu_notifier(&tboot_cpu_notifier);
15227 return 0;
15228 }
15229diff -urNp linux-3.0.4/arch/x86/kernel/time.c linux-3.0.4/arch/x86/kernel/time.c
15230--- linux-3.0.4/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
15231+++ linux-3.0.4/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
15232@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
15233 {
15234 unsigned long pc = instruction_pointer(regs);
15235
15236- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15237+ if (!user_mode(regs) && in_lock_functions(pc)) {
15238 #ifdef CONFIG_FRAME_POINTER
15239- return *(unsigned long *)(regs->bp + sizeof(long));
15240+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15241 #else
15242 unsigned long *sp =
15243 (unsigned long *)kernel_stack_pointer(regs);
15244@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
15245 * or above a saved flags. Eflags has bits 22-31 zero,
15246 * kernel addresses don't.
15247 */
15248+
15249+#ifdef CONFIG_PAX_KERNEXEC
15250+ return ktla_ktva(sp[0]);
15251+#else
15252 if (sp[0] >> 22)
15253 return sp[0];
15254 if (sp[1] >> 22)
15255 return sp[1];
15256 #endif
15257+
15258+#endif
15259 }
15260 return pc;
15261 }
15262diff -urNp linux-3.0.4/arch/x86/kernel/tls.c linux-3.0.4/arch/x86/kernel/tls.c
15263--- linux-3.0.4/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
15264+++ linux-3.0.4/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
15265@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15266 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15267 return -EINVAL;
15268
15269+#ifdef CONFIG_PAX_SEGMEXEC
15270+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15271+ return -EINVAL;
15272+#endif
15273+
15274 set_tls_desc(p, idx, &info, 1);
15275
15276 return 0;
15277diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_32.S linux-3.0.4/arch/x86/kernel/trampoline_32.S
15278--- linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
15279+++ linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
15280@@ -32,6 +32,12 @@
15281 #include <asm/segment.h>
15282 #include <asm/page_types.h>
15283
15284+#ifdef CONFIG_PAX_KERNEXEC
15285+#define ta(X) (X)
15286+#else
15287+#define ta(X) ((X) - __PAGE_OFFSET)
15288+#endif
15289+
15290 #ifdef CONFIG_SMP
15291
15292 .section ".x86_trampoline","a"
15293@@ -62,7 +68,7 @@ r_base = .
15294 inc %ax # protected mode (PE) bit
15295 lmsw %ax # into protected mode
15296 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15297- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15298+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
15299
15300 # These need to be in the same 64K segment as the above;
15301 # hence we don't use the boot_gdt_descr defined in head.S
15302diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_64.S linux-3.0.4/arch/x86/kernel/trampoline_64.S
15303--- linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
15304+++ linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
15305@@ -90,7 +90,7 @@ startup_32:
15306 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15307 movl %eax, %ds
15308
15309- movl $X86_CR4_PAE, %eax
15310+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15311 movl %eax, %cr4 # Enable PAE mode
15312
15313 # Setup trampoline 4 level pagetables
15314@@ -138,7 +138,7 @@ tidt:
15315 # so the kernel can live anywhere
15316 .balign 4
15317 tgdt:
15318- .short tgdt_end - tgdt # gdt limit
15319+ .short tgdt_end - tgdt - 1 # gdt limit
15320 .long tgdt - r_base
15321 .short 0
15322 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15323diff -urNp linux-3.0.4/arch/x86/kernel/traps.c linux-3.0.4/arch/x86/kernel/traps.c
15324--- linux-3.0.4/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
15325+++ linux-3.0.4/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
15326@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15327
15328 /* Do we ignore FPU interrupts ? */
15329 char ignore_fpu_irq;
15330-
15331-/*
15332- * The IDT has to be page-aligned to simplify the Pentium
15333- * F0 0F bug workaround.
15334- */
15335-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15336 #endif
15337
15338 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15339@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15340 }
15341
15342 static void __kprobes
15343-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15344+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15345 long error_code, siginfo_t *info)
15346 {
15347 struct task_struct *tsk = current;
15348
15349 #ifdef CONFIG_X86_32
15350- if (regs->flags & X86_VM_MASK) {
15351+ if (v8086_mode(regs)) {
15352 /*
15353 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15354 * On nmi (interrupt 2), do_trap should not be called.
15355@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15356 }
15357 #endif
15358
15359- if (!user_mode(regs))
15360+ if (!user_mode_novm(regs))
15361 goto kernel_trap;
15362
15363 #ifdef CONFIG_X86_32
15364@@ -157,7 +151,7 @@ trap_signal:
15365 printk_ratelimit()) {
15366 printk(KERN_INFO
15367 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15368- tsk->comm, tsk->pid, str,
15369+ tsk->comm, task_pid_nr(tsk), str,
15370 regs->ip, regs->sp, error_code);
15371 print_vma_addr(" in ", regs->ip);
15372 printk("\n");
15373@@ -174,8 +168,20 @@ kernel_trap:
15374 if (!fixup_exception(regs)) {
15375 tsk->thread.error_code = error_code;
15376 tsk->thread.trap_no = trapnr;
15377+
15378+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15379+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15380+ str = "PAX: suspicious stack segment fault";
15381+#endif
15382+
15383 die(str, regs, error_code);
15384 }
15385+
15386+#ifdef CONFIG_PAX_REFCOUNT
15387+ if (trapnr == 4)
15388+ pax_report_refcount_overflow(regs);
15389+#endif
15390+
15391 return;
15392
15393 #ifdef CONFIG_X86_32
15394@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15395 conditional_sti(regs);
15396
15397 #ifdef CONFIG_X86_32
15398- if (regs->flags & X86_VM_MASK)
15399+ if (v8086_mode(regs))
15400 goto gp_in_vm86;
15401 #endif
15402
15403 tsk = current;
15404- if (!user_mode(regs))
15405+ if (!user_mode_novm(regs))
15406 goto gp_in_kernel;
15407
15408+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15409+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15410+ struct mm_struct *mm = tsk->mm;
15411+ unsigned long limit;
15412+
15413+ down_write(&mm->mmap_sem);
15414+ limit = mm->context.user_cs_limit;
15415+ if (limit < TASK_SIZE) {
15416+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15417+ up_write(&mm->mmap_sem);
15418+ return;
15419+ }
15420+ up_write(&mm->mmap_sem);
15421+ }
15422+#endif
15423+
15424 tsk->thread.error_code = error_code;
15425 tsk->thread.trap_no = 13;
15426
15427@@ -304,6 +326,13 @@ gp_in_kernel:
15428 if (notify_die(DIE_GPF, "general protection fault", regs,
15429 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15430 return;
15431+
15432+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15433+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15434+ die("PAX: suspicious general protection fault", regs, error_code);
15435+ else
15436+#endif
15437+
15438 die("general protection fault", regs, error_code);
15439 }
15440
15441@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15442 dotraplinkage notrace __kprobes void
15443 do_nmi(struct pt_regs *regs, long error_code)
15444 {
15445+
15446+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15447+ if (!user_mode(regs)) {
15448+ unsigned long cs = regs->cs & 0xFFFF;
15449+ unsigned long ip = ktva_ktla(regs->ip);
15450+
15451+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15452+ regs->ip = ip;
15453+ }
15454+#endif
15455+
15456 nmi_enter();
15457
15458 inc_irq_stat(__nmi_count);
15459@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15460 /* It's safe to allow irq's after DR6 has been saved */
15461 preempt_conditional_sti(regs);
15462
15463- if (regs->flags & X86_VM_MASK) {
15464+ if (v8086_mode(regs)) {
15465 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15466 error_code, 1);
15467 preempt_conditional_cli(regs);
15468@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15469 * We already checked v86 mode above, so we can check for kernel mode
15470 * by just checking the CPL of CS.
15471 */
15472- if ((dr6 & DR_STEP) && !user_mode(regs)) {
15473+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15474 tsk->thread.debugreg6 &= ~DR_STEP;
15475 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15476 regs->flags &= ~X86_EFLAGS_TF;
15477@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15478 return;
15479 conditional_sti(regs);
15480
15481- if (!user_mode_vm(regs))
15482+ if (!user_mode(regs))
15483 {
15484 if (!fixup_exception(regs)) {
15485 task->thread.error_code = error_code;
15486@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15487 void __math_state_restore(void)
15488 {
15489 struct thread_info *thread = current_thread_info();
15490- struct task_struct *tsk = thread->task;
15491+ struct task_struct *tsk = current;
15492
15493 /*
15494 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15495@@ -750,8 +790,7 @@ void __math_state_restore(void)
15496 */
15497 asmlinkage void math_state_restore(void)
15498 {
15499- struct thread_info *thread = current_thread_info();
15500- struct task_struct *tsk = thread->task;
15501+ struct task_struct *tsk = current;
15502
15503 if (!tsk_used_math(tsk)) {
15504 local_irq_enable();
15505diff -urNp linux-3.0.4/arch/x86/kernel/verify_cpu.S linux-3.0.4/arch/x86/kernel/verify_cpu.S
15506--- linux-3.0.4/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
15507+++ linux-3.0.4/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
15508@@ -20,6 +20,7 @@
15509 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15510 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15511 * arch/x86/kernel/head_32.S: processor startup
15512+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15513 *
15514 * verify_cpu, returns the status of longmode and SSE in register %eax.
15515 * 0: Success 1: Failure
15516diff -urNp linux-3.0.4/arch/x86/kernel/vm86_32.c linux-3.0.4/arch/x86/kernel/vm86_32.c
15517--- linux-3.0.4/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
15518+++ linux-3.0.4/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
15519@@ -41,6 +41,7 @@
15520 #include <linux/ptrace.h>
15521 #include <linux/audit.h>
15522 #include <linux/stddef.h>
15523+#include <linux/grsecurity.h>
15524
15525 #include <asm/uaccess.h>
15526 #include <asm/io.h>
15527@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15528 do_exit(SIGSEGV);
15529 }
15530
15531- tss = &per_cpu(init_tss, get_cpu());
15532+ tss = init_tss + get_cpu();
15533 current->thread.sp0 = current->thread.saved_sp0;
15534 current->thread.sysenter_cs = __KERNEL_CS;
15535 load_sp0(tss, &current->thread);
15536@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15537 struct task_struct *tsk;
15538 int tmp, ret = -EPERM;
15539
15540+#ifdef CONFIG_GRKERNSEC_VM86
15541+ if (!capable(CAP_SYS_RAWIO)) {
15542+ gr_handle_vm86();
15543+ goto out;
15544+ }
15545+#endif
15546+
15547 tsk = current;
15548 if (tsk->thread.saved_sp0)
15549 goto out;
15550@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15551 int tmp, ret;
15552 struct vm86plus_struct __user *v86;
15553
15554+#ifdef CONFIG_GRKERNSEC_VM86
15555+ if (!capable(CAP_SYS_RAWIO)) {
15556+ gr_handle_vm86();
15557+ ret = -EPERM;
15558+ goto out;
15559+ }
15560+#endif
15561+
15562 tsk = current;
15563 switch (cmd) {
15564 case VM86_REQUEST_IRQ:
15565@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15566 tsk->thread.saved_fs = info->regs32->fs;
15567 tsk->thread.saved_gs = get_user_gs(info->regs32);
15568
15569- tss = &per_cpu(init_tss, get_cpu());
15570+ tss = init_tss + get_cpu();
15571 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15572 if (cpu_has_sep)
15573 tsk->thread.sysenter_cs = 0;
15574@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15575 goto cannot_handle;
15576 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15577 goto cannot_handle;
15578- intr_ptr = (unsigned long __user *) (i << 2);
15579+ intr_ptr = (__force unsigned long __user *) (i << 2);
15580 if (get_user(segoffs, intr_ptr))
15581 goto cannot_handle;
15582 if ((segoffs >> 16) == BIOSSEG)
15583diff -urNp linux-3.0.4/arch/x86/kernel/vmlinux.lds.S linux-3.0.4/arch/x86/kernel/vmlinux.lds.S
15584--- linux-3.0.4/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
15585+++ linux-3.0.4/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
15586@@ -26,6 +26,13 @@
15587 #include <asm/page_types.h>
15588 #include <asm/cache.h>
15589 #include <asm/boot.h>
15590+#include <asm/segment.h>
15591+
15592+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15593+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15594+#else
15595+#define __KERNEL_TEXT_OFFSET 0
15596+#endif
15597
15598 #undef i386 /* in case the preprocessor is a 32bit one */
15599
15600@@ -69,31 +76,46 @@ jiffies_64 = jiffies;
15601
15602 PHDRS {
15603 text PT_LOAD FLAGS(5); /* R_E */
15604+#ifdef CONFIG_X86_32
15605+ module PT_LOAD FLAGS(5); /* R_E */
15606+#endif
15607+#ifdef CONFIG_XEN
15608+ rodata PT_LOAD FLAGS(5); /* R_E */
15609+#else
15610+ rodata PT_LOAD FLAGS(4); /* R__ */
15611+#endif
15612 data PT_LOAD FLAGS(6); /* RW_ */
15613 #ifdef CONFIG_X86_64
15614 user PT_LOAD FLAGS(5); /* R_E */
15615+#endif
15616+ init.begin PT_LOAD FLAGS(6); /* RW_ */
15617 #ifdef CONFIG_SMP
15618 percpu PT_LOAD FLAGS(6); /* RW_ */
15619 #endif
15620+ text.init PT_LOAD FLAGS(5); /* R_E */
15621+ text.exit PT_LOAD FLAGS(5); /* R_E */
15622 init PT_LOAD FLAGS(7); /* RWE */
15623-#endif
15624 note PT_NOTE FLAGS(0); /* ___ */
15625 }
15626
15627 SECTIONS
15628 {
15629 #ifdef CONFIG_X86_32
15630- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15631- phys_startup_32 = startup_32 - LOAD_OFFSET;
15632+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15633 #else
15634- . = __START_KERNEL;
15635- phys_startup_64 = startup_64 - LOAD_OFFSET;
15636+ . = __START_KERNEL;
15637 #endif
15638
15639 /* Text and read-only data */
15640- .text : AT(ADDR(.text) - LOAD_OFFSET) {
15641- _text = .;
15642+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15643 /* bootstrapping code */
15644+#ifdef CONFIG_X86_32
15645+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15646+#else
15647+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15648+#endif
15649+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15650+ _text = .;
15651 HEAD_TEXT
15652 #ifdef CONFIG_X86_32
15653 . = ALIGN(PAGE_SIZE);
15654@@ -109,13 +131,47 @@ SECTIONS
15655 IRQENTRY_TEXT
15656 *(.fixup)
15657 *(.gnu.warning)
15658- /* End of text section */
15659- _etext = .;
15660 } :text = 0x9090
15661
15662- NOTES :text :note
15663+ . += __KERNEL_TEXT_OFFSET;
15664+
15665+#ifdef CONFIG_X86_32
15666+ . = ALIGN(PAGE_SIZE);
15667+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15668+
15669+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15670+ MODULES_EXEC_VADDR = .;
15671+ BYTE(0)
15672+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15673+ . = ALIGN(HPAGE_SIZE);
15674+ MODULES_EXEC_END = . - 1;
15675+#endif
15676+
15677+ } :module
15678+#endif
15679+
15680+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15681+ /* End of text section */
15682+ _etext = . - __KERNEL_TEXT_OFFSET;
15683+ }
15684+
15685+#ifdef CONFIG_X86_32
15686+ . = ALIGN(PAGE_SIZE);
15687+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15688+ *(.idt)
15689+ . = ALIGN(PAGE_SIZE);
15690+ *(.empty_zero_page)
15691+ *(.initial_pg_fixmap)
15692+ *(.initial_pg_pmd)
15693+ *(.initial_page_table)
15694+ *(.swapper_pg_dir)
15695+ } :rodata
15696+#endif
15697+
15698+ . = ALIGN(PAGE_SIZE);
15699+ NOTES :rodata :note
15700
15701- EXCEPTION_TABLE(16) :text = 0x9090
15702+ EXCEPTION_TABLE(16) :rodata
15703
15704 #if defined(CONFIG_DEBUG_RODATA)
15705 /* .text should occupy whole number of pages */
15706@@ -127,16 +183,20 @@ SECTIONS
15707
15708 /* Data */
15709 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15710+
15711+#ifdef CONFIG_PAX_KERNEXEC
15712+ . = ALIGN(HPAGE_SIZE);
15713+#else
15714+ . = ALIGN(PAGE_SIZE);
15715+#endif
15716+
15717 /* Start of data section */
15718 _sdata = .;
15719
15720 /* init_task */
15721 INIT_TASK_DATA(THREAD_SIZE)
15722
15723-#ifdef CONFIG_X86_32
15724- /* 32 bit has nosave before _edata */
15725 NOSAVE_DATA
15726-#endif
15727
15728 PAGE_ALIGNED_DATA(PAGE_SIZE)
15729
15730@@ -208,12 +268,19 @@ SECTIONS
15731 #endif /* CONFIG_X86_64 */
15732
15733 /* Init code and data - will be freed after init */
15734- . = ALIGN(PAGE_SIZE);
15735 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15736+ BYTE(0)
15737+
15738+#ifdef CONFIG_PAX_KERNEXEC
15739+ . = ALIGN(HPAGE_SIZE);
15740+#else
15741+ . = ALIGN(PAGE_SIZE);
15742+#endif
15743+
15744 __init_begin = .; /* paired with __init_end */
15745- }
15746+ } :init.begin
15747
15748-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15749+#ifdef CONFIG_SMP
15750 /*
15751 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15752 * output PHDR, so the next output section - .init.text - should
15753@@ -222,12 +289,27 @@ SECTIONS
15754 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15755 #endif
15756
15757- INIT_TEXT_SECTION(PAGE_SIZE)
15758-#ifdef CONFIG_X86_64
15759- :init
15760-#endif
15761+ . = ALIGN(PAGE_SIZE);
15762+ init_begin = .;
15763+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15764+ VMLINUX_SYMBOL(_sinittext) = .;
15765+ INIT_TEXT
15766+ VMLINUX_SYMBOL(_einittext) = .;
15767+ . = ALIGN(PAGE_SIZE);
15768+ } :text.init
15769
15770- INIT_DATA_SECTION(16)
15771+ /*
15772+ * .exit.text is discard at runtime, not link time, to deal with
15773+ * references from .altinstructions and .eh_frame
15774+ */
15775+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15776+ EXIT_TEXT
15777+ . = ALIGN(16);
15778+ } :text.exit
15779+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15780+
15781+ . = ALIGN(PAGE_SIZE);
15782+ INIT_DATA_SECTION(16) :init
15783
15784 /*
15785 * Code and data for a variety of lowlevel trampolines, to be
15786@@ -301,19 +383,12 @@ SECTIONS
15787 }
15788
15789 . = ALIGN(8);
15790- /*
15791- * .exit.text is discard at runtime, not link time, to deal with
15792- * references from .altinstructions and .eh_frame
15793- */
15794- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15795- EXIT_TEXT
15796- }
15797
15798 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15799 EXIT_DATA
15800 }
15801
15802-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15803+#ifndef CONFIG_SMP
15804 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
15805 #endif
15806
15807@@ -332,16 +407,10 @@ SECTIONS
15808 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15809 __smp_locks = .;
15810 *(.smp_locks)
15811- . = ALIGN(PAGE_SIZE);
15812 __smp_locks_end = .;
15813+ . = ALIGN(PAGE_SIZE);
15814 }
15815
15816-#ifdef CONFIG_X86_64
15817- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15818- NOSAVE_DATA
15819- }
15820-#endif
15821-
15822 /* BSS */
15823 . = ALIGN(PAGE_SIZE);
15824 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15825@@ -357,6 +426,7 @@ SECTIONS
15826 __brk_base = .;
15827 . += 64 * 1024; /* 64k alignment slop space */
15828 *(.brk_reservation) /* areas brk users have reserved */
15829+ . = ALIGN(HPAGE_SIZE);
15830 __brk_limit = .;
15831 }
15832
15833@@ -383,13 +453,12 @@ SECTIONS
15834 * for the boot processor.
15835 */
15836 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15837-INIT_PER_CPU(gdt_page);
15838 INIT_PER_CPU(irq_stack_union);
15839
15840 /*
15841 * Build-time check on the image size:
15842 */
15843-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15844+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15845 "kernel image bigger than KERNEL_IMAGE_SIZE");
15846
15847 #ifdef CONFIG_SMP
15848diff -urNp linux-3.0.4/arch/x86/kernel/vsyscall_64.c linux-3.0.4/arch/x86/kernel/vsyscall_64.c
15849--- linux-3.0.4/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
15850+++ linux-3.0.4/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
15851@@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
15852 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
15853 {
15854 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
15855- .sysctl_enabled = 1,
15856+ .sysctl_enabled = 0,
15857 };
15858
15859 void update_vsyscall_tz(void)
15860@@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
15861 static ctl_table kernel_table2[] = {
15862 { .procname = "vsyscall64",
15863 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
15864- .mode = 0644,
15865+ .mode = 0444,
15866 .proc_handler = proc_dointvec },
15867 {}
15868 };
15869diff -urNp linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c
15870--- linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
15871+++ linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
15872@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15873 EXPORT_SYMBOL(copy_user_generic_string);
15874 EXPORT_SYMBOL(copy_user_generic_unrolled);
15875 EXPORT_SYMBOL(__copy_user_nocache);
15876-EXPORT_SYMBOL(_copy_from_user);
15877-EXPORT_SYMBOL(_copy_to_user);
15878
15879 EXPORT_SYMBOL(copy_page);
15880 EXPORT_SYMBOL(clear_page);
15881diff -urNp linux-3.0.4/arch/x86/kernel/xsave.c linux-3.0.4/arch/x86/kernel/xsave.c
15882--- linux-3.0.4/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
15883+++ linux-3.0.4/arch/x86/kernel/xsave.c 2011-08-23 21:47:55.000000000 -0400
15884@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15885 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15886 return -EINVAL;
15887
15888- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15889+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15890 fx_sw_user->extended_size -
15891 FP_XSTATE_MAGIC2_SIZE));
15892 if (err)
15893@@ -267,7 +267,7 @@ fx_only:
15894 * the other extended state.
15895 */
15896 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15897- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15898+ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15899 }
15900
15901 /*
15902@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15903 if (use_xsave())
15904 err = restore_user_xstate(buf);
15905 else
15906- err = fxrstor_checking((__force struct i387_fxsave_struct *)
15907+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
15908 buf);
15909 if (unlikely(err)) {
15910 /*
15911diff -urNp linux-3.0.4/arch/x86/kvm/emulate.c linux-3.0.4/arch/x86/kvm/emulate.c
15912--- linux-3.0.4/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
15913+++ linux-3.0.4/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
15914@@ -96,7 +96,7 @@
15915 #define Src2ImmByte (2<<29)
15916 #define Src2One (3<<29)
15917 #define Src2Imm (4<<29)
15918-#define Src2Mask (7<<29)
15919+#define Src2Mask (7U<<29)
15920
15921 #define X2(x...) x, x
15922 #define X3(x...) X2(x), x
15923@@ -207,6 +207,7 @@ struct gprefix {
15924
15925 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
15926 do { \
15927+ unsigned long _tmp; \
15928 __asm__ __volatile__ ( \
15929 _PRE_EFLAGS("0", "4", "2") \
15930 _op _suffix " %"_x"3,%1; " \
15931@@ -220,8 +221,6 @@ struct gprefix {
15932 /* Raw emulation: instruction has two explicit operands. */
15933 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
15934 do { \
15935- unsigned long _tmp; \
15936- \
15937 switch ((_dst).bytes) { \
15938 case 2: \
15939 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
15940@@ -237,7 +236,6 @@ struct gprefix {
15941
15942 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
15943 do { \
15944- unsigned long _tmp; \
15945 switch ((_dst).bytes) { \
15946 case 1: \
15947 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
15948diff -urNp linux-3.0.4/arch/x86/kvm/lapic.c linux-3.0.4/arch/x86/kvm/lapic.c
15949--- linux-3.0.4/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
15950+++ linux-3.0.4/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
15951@@ -53,7 +53,7 @@
15952 #define APIC_BUS_CYCLE_NS 1
15953
15954 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
15955-#define apic_debug(fmt, arg...)
15956+#define apic_debug(fmt, arg...) do {} while (0)
15957
15958 #define APIC_LVT_NUM 6
15959 /* 14 is the version for Xeon and Pentium 8.4.8*/
15960diff -urNp linux-3.0.4/arch/x86/kvm/mmu.c linux-3.0.4/arch/x86/kvm/mmu.c
15961--- linux-3.0.4/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
15962+++ linux-3.0.4/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
15963@@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15964
15965 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
15966
15967- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
15968+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
15969
15970 /*
15971 * Assume that the pte write on a page table of the same type
15972@@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15973 }
15974
15975 spin_lock(&vcpu->kvm->mmu_lock);
15976- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15977+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15978 gentry = 0;
15979 kvm_mmu_free_some_pages(vcpu);
15980 ++vcpu->kvm->stat.mmu_pte_write;
15981diff -urNp linux-3.0.4/arch/x86/kvm/paging_tmpl.h linux-3.0.4/arch/x86/kvm/paging_tmpl.h
15982--- linux-3.0.4/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
15983+++ linux-3.0.4/arch/x86/kvm/paging_tmpl.h 2011-08-23 21:48:14.000000000 -0400
15984@@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
15985 unsigned long mmu_seq;
15986 bool map_writable;
15987
15988+ pax_track_stack();
15989+
15990 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
15991
15992 r = mmu_topup_memory_caches(vcpu);
15993@@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
15994 if (need_flush)
15995 kvm_flush_remote_tlbs(vcpu->kvm);
15996
15997- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
15998+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
15999
16000 spin_unlock(&vcpu->kvm->mmu_lock);
16001
16002diff -urNp linux-3.0.4/arch/x86/kvm/svm.c linux-3.0.4/arch/x86/kvm/svm.c
16003--- linux-3.0.4/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
16004+++ linux-3.0.4/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
16005@@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
16006 int cpu = raw_smp_processor_id();
16007
16008 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16009+
16010+ pax_open_kernel();
16011 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16012+ pax_close_kernel();
16013+
16014 load_TR_desc();
16015 }
16016
16017@@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16018 #endif
16019 #endif
16020
16021+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16022+ __set_fs(current_thread_info()->addr_limit);
16023+#endif
16024+
16025 reload_tss(vcpu);
16026
16027 local_irq_disable();
16028diff -urNp linux-3.0.4/arch/x86/kvm/vmx.c linux-3.0.4/arch/x86/kvm/vmx.c
16029--- linux-3.0.4/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16030+++ linux-3.0.4/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16031@@ -797,7 +797,11 @@ static void reload_tss(void)
16032 struct desc_struct *descs;
16033
16034 descs = (void *)gdt->address;
16035+
16036+ pax_open_kernel();
16037 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16038+ pax_close_kernel();
16039+
16040 load_TR_desc();
16041 }
16042
16043@@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16044 if (!cpu_has_vmx_flexpriority())
16045 flexpriority_enabled = 0;
16046
16047- if (!cpu_has_vmx_tpr_shadow())
16048- kvm_x86_ops->update_cr8_intercept = NULL;
16049+ if (!cpu_has_vmx_tpr_shadow()) {
16050+ pax_open_kernel();
16051+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16052+ pax_close_kernel();
16053+ }
16054
16055 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16056 kvm_disable_largepages();
16057@@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16058 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16059
16060 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16061- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16062+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16063 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16064 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16065 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16066@@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16067 "jmp .Lkvm_vmx_return \n\t"
16068 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16069 ".Lkvm_vmx_return: "
16070+
16071+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16072+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16073+ ".Lkvm_vmx_return2: "
16074+#endif
16075+
16076 /* Save guest registers, load host registers, keep flags */
16077 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16078 "pop %0 \n\t"
16079@@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16080 #endif
16081 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16082 [wordsize]"i"(sizeof(ulong))
16083+
16084+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16085+ ,[cs]"i"(__KERNEL_CS)
16086+#endif
16087+
16088 : "cc", "memory"
16089 , R"ax", R"bx", R"di", R"si"
16090 #ifdef CONFIG_X86_64
16091@@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16092
16093 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16094
16095- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16096+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16097+
16098+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16099+ loadsegment(fs, __KERNEL_PERCPU);
16100+#endif
16101+
16102+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16103+ __set_fs(current_thread_info()->addr_limit);
16104+#endif
16105+
16106 vmx->launched = 1;
16107
16108 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16109diff -urNp linux-3.0.4/arch/x86/kvm/x86.c linux-3.0.4/arch/x86/kvm/x86.c
16110--- linux-3.0.4/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
16111+++ linux-3.0.4/arch/x86/kvm/x86.c 2011-08-23 21:47:55.000000000 -0400
16112@@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16113 if (n < msr_list.nmsrs)
16114 goto out;
16115 r = -EFAULT;
16116+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16117+ goto out;
16118 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16119 num_msrs_to_save * sizeof(u32)))
16120 goto out;
16121@@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16122 struct kvm_cpuid2 *cpuid,
16123 struct kvm_cpuid_entry2 __user *entries)
16124 {
16125- int r;
16126+ int r, i;
16127
16128 r = -E2BIG;
16129 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16130 goto out;
16131 r = -EFAULT;
16132- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16133- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16134+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16135 goto out;
16136+ for (i = 0; i < cpuid->nent; ++i) {
16137+ struct kvm_cpuid_entry2 cpuid_entry;
16138+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16139+ goto out;
16140+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
16141+ }
16142 vcpu->arch.cpuid_nent = cpuid->nent;
16143 kvm_apic_set_version(vcpu);
16144 kvm_x86_ops->cpuid_update(vcpu);
16145@@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16146 struct kvm_cpuid2 *cpuid,
16147 struct kvm_cpuid_entry2 __user *entries)
16148 {
16149- int r;
16150+ int r, i;
16151
16152 r = -E2BIG;
16153 if (cpuid->nent < vcpu->arch.cpuid_nent)
16154 goto out;
16155 r = -EFAULT;
16156- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16157- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16158+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16159 goto out;
16160+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16161+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16162+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16163+ goto out;
16164+ }
16165 return 0;
16166
16167 out:
16168@@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16169 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16170 struct kvm_interrupt *irq)
16171 {
16172- if (irq->irq < 0 || irq->irq >= 256)
16173+ if (irq->irq >= 256)
16174 return -EINVAL;
16175 if (irqchip_in_kernel(vcpu->kvm))
16176 return -ENXIO;
16177@@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16178 }
16179 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16180
16181-int kvm_arch_init(void *opaque)
16182+int kvm_arch_init(const void *opaque)
16183 {
16184 int r;
16185 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16186diff -urNp linux-3.0.4/arch/x86/lguest/boot.c linux-3.0.4/arch/x86/lguest/boot.c
16187--- linux-3.0.4/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
16188+++ linux-3.0.4/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
16189@@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16190 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16191 * Launcher to reboot us.
16192 */
16193-static void lguest_restart(char *reason)
16194+static __noreturn void lguest_restart(char *reason)
16195 {
16196 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16197+ BUG();
16198 }
16199
16200 /*G:050
16201diff -urNp linux-3.0.4/arch/x86/lib/atomic64_32.c linux-3.0.4/arch/x86/lib/atomic64_32.c
16202--- linux-3.0.4/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
16203+++ linux-3.0.4/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
16204@@ -8,18 +8,30 @@
16205
16206 long long atomic64_read_cx8(long long, const atomic64_t *v);
16207 EXPORT_SYMBOL(atomic64_read_cx8);
16208+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16209+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16210 long long atomic64_set_cx8(long long, const atomic64_t *v);
16211 EXPORT_SYMBOL(atomic64_set_cx8);
16212+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16213+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16214 long long atomic64_xchg_cx8(long long, unsigned high);
16215 EXPORT_SYMBOL(atomic64_xchg_cx8);
16216 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16217 EXPORT_SYMBOL(atomic64_add_return_cx8);
16218+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16219+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16220 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16221 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16222+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16223+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16224 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16225 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16226+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16227+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16228 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16229 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16230+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16231+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16232 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16233 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16234 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16235@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16236 #ifndef CONFIG_X86_CMPXCHG64
16237 long long atomic64_read_386(long long, const atomic64_t *v);
16238 EXPORT_SYMBOL(atomic64_read_386);
16239+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16240+EXPORT_SYMBOL(atomic64_read_unchecked_386);
16241 long long atomic64_set_386(long long, const atomic64_t *v);
16242 EXPORT_SYMBOL(atomic64_set_386);
16243+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16244+EXPORT_SYMBOL(atomic64_set_unchecked_386);
16245 long long atomic64_xchg_386(long long, unsigned high);
16246 EXPORT_SYMBOL(atomic64_xchg_386);
16247 long long atomic64_add_return_386(long long a, atomic64_t *v);
16248 EXPORT_SYMBOL(atomic64_add_return_386);
16249+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16250+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16251 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16252 EXPORT_SYMBOL(atomic64_sub_return_386);
16253+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16254+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16255 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16256 EXPORT_SYMBOL(atomic64_inc_return_386);
16257+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16258+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16259 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16260 EXPORT_SYMBOL(atomic64_dec_return_386);
16261+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16262+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16263 long long atomic64_add_386(long long a, atomic64_t *v);
16264 EXPORT_SYMBOL(atomic64_add_386);
16265+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16266+EXPORT_SYMBOL(atomic64_add_unchecked_386);
16267 long long atomic64_sub_386(long long a, atomic64_t *v);
16268 EXPORT_SYMBOL(atomic64_sub_386);
16269+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16270+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16271 long long atomic64_inc_386(long long a, atomic64_t *v);
16272 EXPORT_SYMBOL(atomic64_inc_386);
16273+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16274+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16275 long long atomic64_dec_386(long long a, atomic64_t *v);
16276 EXPORT_SYMBOL(atomic64_dec_386);
16277+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16278+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16279 long long atomic64_dec_if_positive_386(atomic64_t *v);
16280 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16281 int atomic64_inc_not_zero_386(atomic64_t *v);
16282diff -urNp linux-3.0.4/arch/x86/lib/atomic64_386_32.S linux-3.0.4/arch/x86/lib/atomic64_386_32.S
16283--- linux-3.0.4/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
16284+++ linux-3.0.4/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
16285@@ -48,6 +48,10 @@ BEGIN(read)
16286 movl (v), %eax
16287 movl 4(v), %edx
16288 RET_ENDP
16289+BEGIN(read_unchecked)
16290+ movl (v), %eax
16291+ movl 4(v), %edx
16292+RET_ENDP
16293 #undef v
16294
16295 #define v %esi
16296@@ -55,6 +59,10 @@ BEGIN(set)
16297 movl %ebx, (v)
16298 movl %ecx, 4(v)
16299 RET_ENDP
16300+BEGIN(set_unchecked)
16301+ movl %ebx, (v)
16302+ movl %ecx, 4(v)
16303+RET_ENDP
16304 #undef v
16305
16306 #define v %esi
16307@@ -70,6 +78,20 @@ RET_ENDP
16308 BEGIN(add)
16309 addl %eax, (v)
16310 adcl %edx, 4(v)
16311+
16312+#ifdef CONFIG_PAX_REFCOUNT
16313+ jno 0f
16314+ subl %eax, (v)
16315+ sbbl %edx, 4(v)
16316+ int $4
16317+0:
16318+ _ASM_EXTABLE(0b, 0b)
16319+#endif
16320+
16321+RET_ENDP
16322+BEGIN(add_unchecked)
16323+ addl %eax, (v)
16324+ adcl %edx, 4(v)
16325 RET_ENDP
16326 #undef v
16327
16328@@ -77,6 +99,24 @@ RET_ENDP
16329 BEGIN(add_return)
16330 addl (v), %eax
16331 adcl 4(v), %edx
16332+
16333+#ifdef CONFIG_PAX_REFCOUNT
16334+ into
16335+1234:
16336+ _ASM_EXTABLE(1234b, 2f)
16337+#endif
16338+
16339+ movl %eax, (v)
16340+ movl %edx, 4(v)
16341+
16342+#ifdef CONFIG_PAX_REFCOUNT
16343+2:
16344+#endif
16345+
16346+RET_ENDP
16347+BEGIN(add_return_unchecked)
16348+ addl (v), %eax
16349+ adcl 4(v), %edx
16350 movl %eax, (v)
16351 movl %edx, 4(v)
16352 RET_ENDP
16353@@ -86,6 +126,20 @@ RET_ENDP
16354 BEGIN(sub)
16355 subl %eax, (v)
16356 sbbl %edx, 4(v)
16357+
16358+#ifdef CONFIG_PAX_REFCOUNT
16359+ jno 0f
16360+ addl %eax, (v)
16361+ adcl %edx, 4(v)
16362+ int $4
16363+0:
16364+ _ASM_EXTABLE(0b, 0b)
16365+#endif
16366+
16367+RET_ENDP
16368+BEGIN(sub_unchecked)
16369+ subl %eax, (v)
16370+ sbbl %edx, 4(v)
16371 RET_ENDP
16372 #undef v
16373
16374@@ -96,6 +150,27 @@ BEGIN(sub_return)
16375 sbbl $0, %edx
16376 addl (v), %eax
16377 adcl 4(v), %edx
16378+
16379+#ifdef CONFIG_PAX_REFCOUNT
16380+ into
16381+1234:
16382+ _ASM_EXTABLE(1234b, 2f)
16383+#endif
16384+
16385+ movl %eax, (v)
16386+ movl %edx, 4(v)
16387+
16388+#ifdef CONFIG_PAX_REFCOUNT
16389+2:
16390+#endif
16391+
16392+RET_ENDP
16393+BEGIN(sub_return_unchecked)
16394+ negl %edx
16395+ negl %eax
16396+ sbbl $0, %edx
16397+ addl (v), %eax
16398+ adcl 4(v), %edx
16399 movl %eax, (v)
16400 movl %edx, 4(v)
16401 RET_ENDP
16402@@ -105,6 +180,20 @@ RET_ENDP
16403 BEGIN(inc)
16404 addl $1, (v)
16405 adcl $0, 4(v)
16406+
16407+#ifdef CONFIG_PAX_REFCOUNT
16408+ jno 0f
16409+ subl $1, (v)
16410+ sbbl $0, 4(v)
16411+ int $4
16412+0:
16413+ _ASM_EXTABLE(0b, 0b)
16414+#endif
16415+
16416+RET_ENDP
16417+BEGIN(inc_unchecked)
16418+ addl $1, (v)
16419+ adcl $0, 4(v)
16420 RET_ENDP
16421 #undef v
16422
16423@@ -114,6 +203,26 @@ BEGIN(inc_return)
16424 movl 4(v), %edx
16425 addl $1, %eax
16426 adcl $0, %edx
16427+
16428+#ifdef CONFIG_PAX_REFCOUNT
16429+ into
16430+1234:
16431+ _ASM_EXTABLE(1234b, 2f)
16432+#endif
16433+
16434+ movl %eax, (v)
16435+ movl %edx, 4(v)
16436+
16437+#ifdef CONFIG_PAX_REFCOUNT
16438+2:
16439+#endif
16440+
16441+RET_ENDP
16442+BEGIN(inc_return_unchecked)
16443+ movl (v), %eax
16444+ movl 4(v), %edx
16445+ addl $1, %eax
16446+ adcl $0, %edx
16447 movl %eax, (v)
16448 movl %edx, 4(v)
16449 RET_ENDP
16450@@ -123,6 +232,20 @@ RET_ENDP
16451 BEGIN(dec)
16452 subl $1, (v)
16453 sbbl $0, 4(v)
16454+
16455+#ifdef CONFIG_PAX_REFCOUNT
16456+ jno 0f
16457+ addl $1, (v)
16458+ adcl $0, 4(v)
16459+ int $4
16460+0:
16461+ _ASM_EXTABLE(0b, 0b)
16462+#endif
16463+
16464+RET_ENDP
16465+BEGIN(dec_unchecked)
16466+ subl $1, (v)
16467+ sbbl $0, 4(v)
16468 RET_ENDP
16469 #undef v
16470
16471@@ -132,6 +255,26 @@ BEGIN(dec_return)
16472 movl 4(v), %edx
16473 subl $1, %eax
16474 sbbl $0, %edx
16475+
16476+#ifdef CONFIG_PAX_REFCOUNT
16477+ into
16478+1234:
16479+ _ASM_EXTABLE(1234b, 2f)
16480+#endif
16481+
16482+ movl %eax, (v)
16483+ movl %edx, 4(v)
16484+
16485+#ifdef CONFIG_PAX_REFCOUNT
16486+2:
16487+#endif
16488+
16489+RET_ENDP
16490+BEGIN(dec_return_unchecked)
16491+ movl (v), %eax
16492+ movl 4(v), %edx
16493+ subl $1, %eax
16494+ sbbl $0, %edx
16495 movl %eax, (v)
16496 movl %edx, 4(v)
16497 RET_ENDP
16498@@ -143,6 +286,13 @@ BEGIN(add_unless)
16499 adcl %edx, %edi
16500 addl (v), %eax
16501 adcl 4(v), %edx
16502+
16503+#ifdef CONFIG_PAX_REFCOUNT
16504+ into
16505+1234:
16506+ _ASM_EXTABLE(1234b, 2f)
16507+#endif
16508+
16509 cmpl %eax, %esi
16510 je 3f
16511 1:
16512@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16513 1:
16514 addl $1, %eax
16515 adcl $0, %edx
16516+
16517+#ifdef CONFIG_PAX_REFCOUNT
16518+ into
16519+1234:
16520+ _ASM_EXTABLE(1234b, 2f)
16521+#endif
16522+
16523 movl %eax, (v)
16524 movl %edx, 4(v)
16525 movl $1, %eax
16526@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16527 movl 4(v), %edx
16528 subl $1, %eax
16529 sbbl $0, %edx
16530+
16531+#ifdef CONFIG_PAX_REFCOUNT
16532+ into
16533+1234:
16534+ _ASM_EXTABLE(1234b, 1f)
16535+#endif
16536+
16537 js 1f
16538 movl %eax, (v)
16539 movl %edx, 4(v)
16540diff -urNp linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S
16541--- linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
16542+++ linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S 2011-08-23 21:47:55.000000000 -0400
16543@@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
16544 CFI_ENDPROC
16545 ENDPROC(atomic64_read_cx8)
16546
16547+ENTRY(atomic64_read_unchecked_cx8)
16548+ CFI_STARTPROC
16549+
16550+ read64 %ecx
16551+ ret
16552+ CFI_ENDPROC
16553+ENDPROC(atomic64_read_unchecked_cx8)
16554+
16555 ENTRY(atomic64_set_cx8)
16556 CFI_STARTPROC
16557
16558@@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
16559 CFI_ENDPROC
16560 ENDPROC(atomic64_set_cx8)
16561
16562+ENTRY(atomic64_set_unchecked_cx8)
16563+ CFI_STARTPROC
16564+
16565+1:
16566+/* we don't need LOCK_PREFIX since aligned 64-bit writes
16567+ * are atomic on 586 and newer */
16568+ cmpxchg8b (%esi)
16569+ jne 1b
16570+
16571+ ret
16572+ CFI_ENDPROC
16573+ENDPROC(atomic64_set_unchecked_cx8)
16574+
16575 ENTRY(atomic64_xchg_cx8)
16576 CFI_STARTPROC
16577
16578@@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
16579 CFI_ENDPROC
16580 ENDPROC(atomic64_xchg_cx8)
16581
16582-.macro addsub_return func ins insc
16583-ENTRY(atomic64_\func\()_return_cx8)
16584+.macro addsub_return func ins insc unchecked=""
16585+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16586 CFI_STARTPROC
16587 SAVE ebp
16588 SAVE ebx
16589@@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
16590 movl %edx, %ecx
16591 \ins\()l %esi, %ebx
16592 \insc\()l %edi, %ecx
16593+
16594+.ifb \unchecked
16595+#ifdef CONFIG_PAX_REFCOUNT
16596+ into
16597+2:
16598+ _ASM_EXTABLE(2b, 3f)
16599+#endif
16600+.endif
16601+
16602 LOCK_PREFIX
16603 cmpxchg8b (%ebp)
16604 jne 1b
16605-
16606-10:
16607 movl %ebx, %eax
16608 movl %ecx, %edx
16609+
16610+.ifb \unchecked
16611+#ifdef CONFIG_PAX_REFCOUNT
16612+3:
16613+#endif
16614+.endif
16615+
16616 RESTORE edi
16617 RESTORE esi
16618 RESTORE ebx
16619 RESTORE ebp
16620 ret
16621 CFI_ENDPROC
16622-ENDPROC(atomic64_\func\()_return_cx8)
16623+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16624 .endm
16625
16626 addsub_return add add adc
16627 addsub_return sub sub sbb
16628+addsub_return add add adc _unchecked
16629+addsub_return sub sub sbb _unchecked
16630
16631-.macro incdec_return func ins insc
16632-ENTRY(atomic64_\func\()_return_cx8)
16633+.macro incdec_return func ins insc unchecked
16634+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16635 CFI_STARTPROC
16636 SAVE ebx
16637
16638@@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
16639 movl %edx, %ecx
16640 \ins\()l $1, %ebx
16641 \insc\()l $0, %ecx
16642+
16643+.ifb \unchecked
16644+#ifdef CONFIG_PAX_REFCOUNT
16645+ into
16646+2:
16647+ _ASM_EXTABLE(2b, 3f)
16648+#endif
16649+.endif
16650+
16651 LOCK_PREFIX
16652 cmpxchg8b (%esi)
16653 jne 1b
16654
16655-10:
16656 movl %ebx, %eax
16657 movl %ecx, %edx
16658+
16659+.ifb \unchecked
16660+#ifdef CONFIG_PAX_REFCOUNT
16661+3:
16662+#endif
16663+.endif
16664+
16665 RESTORE ebx
16666 ret
16667 CFI_ENDPROC
16668-ENDPROC(atomic64_\func\()_return_cx8)
16669+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16670 .endm
16671
16672 incdec_return inc add adc
16673 incdec_return dec sub sbb
16674+incdec_return inc add adc _unchecked
16675+incdec_return dec sub sbb _unchecked
16676
16677 ENTRY(atomic64_dec_if_positive_cx8)
16678 CFI_STARTPROC
16679@@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16680 movl %edx, %ecx
16681 subl $1, %ebx
16682 sbb $0, %ecx
16683+
16684+#ifdef CONFIG_PAX_REFCOUNT
16685+ into
16686+1234:
16687+ _ASM_EXTABLE(1234b, 2f)
16688+#endif
16689+
16690 js 2f
16691 LOCK_PREFIX
16692 cmpxchg8b (%esi)
16693@@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
16694 movl %edx, %ecx
16695 addl %esi, %ebx
16696 adcl %edi, %ecx
16697+
16698+#ifdef CONFIG_PAX_REFCOUNT
16699+ into
16700+1234:
16701+ _ASM_EXTABLE(1234b, 3f)
16702+#endif
16703+
16704 LOCK_PREFIX
16705 cmpxchg8b (%ebp)
16706 jne 1b
16707@@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16708 movl %edx, %ecx
16709 addl $1, %ebx
16710 adcl $0, %ecx
16711+
16712+#ifdef CONFIG_PAX_REFCOUNT
16713+ into
16714+1234:
16715+ _ASM_EXTABLE(1234b, 3f)
16716+#endif
16717+
16718 LOCK_PREFIX
16719 cmpxchg8b (%esi)
16720 jne 1b
16721diff -urNp linux-3.0.4/arch/x86/lib/checksum_32.S linux-3.0.4/arch/x86/lib/checksum_32.S
16722--- linux-3.0.4/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
16723+++ linux-3.0.4/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
16724@@ -28,7 +28,8 @@
16725 #include <linux/linkage.h>
16726 #include <asm/dwarf2.h>
16727 #include <asm/errno.h>
16728-
16729+#include <asm/segment.h>
16730+
16731 /*
16732 * computes a partial checksum, e.g. for TCP/UDP fragments
16733 */
16734@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16735
16736 #define ARGBASE 16
16737 #define FP 12
16738-
16739-ENTRY(csum_partial_copy_generic)
16740+
16741+ENTRY(csum_partial_copy_generic_to_user)
16742 CFI_STARTPROC
16743+
16744+#ifdef CONFIG_PAX_MEMORY_UDEREF
16745+ pushl_cfi %gs
16746+ popl_cfi %es
16747+ jmp csum_partial_copy_generic
16748+#endif
16749+
16750+ENTRY(csum_partial_copy_generic_from_user)
16751+
16752+#ifdef CONFIG_PAX_MEMORY_UDEREF
16753+ pushl_cfi %gs
16754+ popl_cfi %ds
16755+#endif
16756+
16757+ENTRY(csum_partial_copy_generic)
16758 subl $4,%esp
16759 CFI_ADJUST_CFA_OFFSET 4
16760 pushl_cfi %edi
16761@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16762 jmp 4f
16763 SRC(1: movw (%esi), %bx )
16764 addl $2, %esi
16765-DST( movw %bx, (%edi) )
16766+DST( movw %bx, %es:(%edi) )
16767 addl $2, %edi
16768 addw %bx, %ax
16769 adcl $0, %eax
16770@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16771 SRC(1: movl (%esi), %ebx )
16772 SRC( movl 4(%esi), %edx )
16773 adcl %ebx, %eax
16774-DST( movl %ebx, (%edi) )
16775+DST( movl %ebx, %es:(%edi) )
16776 adcl %edx, %eax
16777-DST( movl %edx, 4(%edi) )
16778+DST( movl %edx, %es:4(%edi) )
16779
16780 SRC( movl 8(%esi), %ebx )
16781 SRC( movl 12(%esi), %edx )
16782 adcl %ebx, %eax
16783-DST( movl %ebx, 8(%edi) )
16784+DST( movl %ebx, %es:8(%edi) )
16785 adcl %edx, %eax
16786-DST( movl %edx, 12(%edi) )
16787+DST( movl %edx, %es:12(%edi) )
16788
16789 SRC( movl 16(%esi), %ebx )
16790 SRC( movl 20(%esi), %edx )
16791 adcl %ebx, %eax
16792-DST( movl %ebx, 16(%edi) )
16793+DST( movl %ebx, %es:16(%edi) )
16794 adcl %edx, %eax
16795-DST( movl %edx, 20(%edi) )
16796+DST( movl %edx, %es:20(%edi) )
16797
16798 SRC( movl 24(%esi), %ebx )
16799 SRC( movl 28(%esi), %edx )
16800 adcl %ebx, %eax
16801-DST( movl %ebx, 24(%edi) )
16802+DST( movl %ebx, %es:24(%edi) )
16803 adcl %edx, %eax
16804-DST( movl %edx, 28(%edi) )
16805+DST( movl %edx, %es:28(%edi) )
16806
16807 lea 32(%esi), %esi
16808 lea 32(%edi), %edi
16809@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16810 shrl $2, %edx # This clears CF
16811 SRC(3: movl (%esi), %ebx )
16812 adcl %ebx, %eax
16813-DST( movl %ebx, (%edi) )
16814+DST( movl %ebx, %es:(%edi) )
16815 lea 4(%esi), %esi
16816 lea 4(%edi), %edi
16817 dec %edx
16818@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16819 jb 5f
16820 SRC( movw (%esi), %cx )
16821 leal 2(%esi), %esi
16822-DST( movw %cx, (%edi) )
16823+DST( movw %cx, %es:(%edi) )
16824 leal 2(%edi), %edi
16825 je 6f
16826 shll $16,%ecx
16827 SRC(5: movb (%esi), %cl )
16828-DST( movb %cl, (%edi) )
16829+DST( movb %cl, %es:(%edi) )
16830 6: addl %ecx, %eax
16831 adcl $0, %eax
16832 7:
16833@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16834
16835 6001:
16836 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16837- movl $-EFAULT, (%ebx)
16838+ movl $-EFAULT, %ss:(%ebx)
16839
16840 # zero the complete destination - computing the rest
16841 # is too much work
16842@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16843
16844 6002:
16845 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16846- movl $-EFAULT,(%ebx)
16847+ movl $-EFAULT,%ss:(%ebx)
16848 jmp 5000b
16849
16850 .previous
16851
16852+ pushl_cfi %ss
16853+ popl_cfi %ds
16854+ pushl_cfi %ss
16855+ popl_cfi %es
16856 popl_cfi %ebx
16857 CFI_RESTORE ebx
16858 popl_cfi %esi
16859@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16860 popl_cfi %ecx # equivalent to addl $4,%esp
16861 ret
16862 CFI_ENDPROC
16863-ENDPROC(csum_partial_copy_generic)
16864+ENDPROC(csum_partial_copy_generic_to_user)
16865
16866 #else
16867
16868 /* Version for PentiumII/PPro */
16869
16870 #define ROUND1(x) \
16871+ nop; nop; nop; \
16872 SRC(movl x(%esi), %ebx ) ; \
16873 addl %ebx, %eax ; \
16874- DST(movl %ebx, x(%edi) ) ;
16875+ DST(movl %ebx, %es:x(%edi)) ;
16876
16877 #define ROUND(x) \
16878+ nop; nop; nop; \
16879 SRC(movl x(%esi), %ebx ) ; \
16880 adcl %ebx, %eax ; \
16881- DST(movl %ebx, x(%edi) ) ;
16882+ DST(movl %ebx, %es:x(%edi)) ;
16883
16884 #define ARGBASE 12
16885-
16886-ENTRY(csum_partial_copy_generic)
16887+
16888+ENTRY(csum_partial_copy_generic_to_user)
16889 CFI_STARTPROC
16890+
16891+#ifdef CONFIG_PAX_MEMORY_UDEREF
16892+ pushl_cfi %gs
16893+ popl_cfi %es
16894+ jmp csum_partial_copy_generic
16895+#endif
16896+
16897+ENTRY(csum_partial_copy_generic_from_user)
16898+
16899+#ifdef CONFIG_PAX_MEMORY_UDEREF
16900+ pushl_cfi %gs
16901+ popl_cfi %ds
16902+#endif
16903+
16904+ENTRY(csum_partial_copy_generic)
16905 pushl_cfi %ebx
16906 CFI_REL_OFFSET ebx, 0
16907 pushl_cfi %edi
16908@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
16909 subl %ebx, %edi
16910 lea -1(%esi),%edx
16911 andl $-32,%edx
16912- lea 3f(%ebx,%ebx), %ebx
16913+ lea 3f(%ebx,%ebx,2), %ebx
16914 testl %esi, %esi
16915 jmp *%ebx
16916 1: addl $64,%esi
16917@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
16918 jb 5f
16919 SRC( movw (%esi), %dx )
16920 leal 2(%esi), %esi
16921-DST( movw %dx, (%edi) )
16922+DST( movw %dx, %es:(%edi) )
16923 leal 2(%edi), %edi
16924 je 6f
16925 shll $16,%edx
16926 5:
16927 SRC( movb (%esi), %dl )
16928-DST( movb %dl, (%edi) )
16929+DST( movb %dl, %es:(%edi) )
16930 6: addl %edx, %eax
16931 adcl $0, %eax
16932 7:
16933 .section .fixup, "ax"
16934 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
16935- movl $-EFAULT, (%ebx)
16936+ movl $-EFAULT, %ss:(%ebx)
16937 # zero the complete destination (computing the rest is too much work)
16938 movl ARGBASE+8(%esp),%edi # dst
16939 movl ARGBASE+12(%esp),%ecx # len
16940@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
16941 rep; stosb
16942 jmp 7b
16943 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16944- movl $-EFAULT, (%ebx)
16945+ movl $-EFAULT, %ss:(%ebx)
16946 jmp 7b
16947 .previous
16948
16949+#ifdef CONFIG_PAX_MEMORY_UDEREF
16950+ pushl_cfi %ss
16951+ popl_cfi %ds
16952+ pushl_cfi %ss
16953+ popl_cfi %es
16954+#endif
16955+
16956 popl_cfi %esi
16957 CFI_RESTORE esi
16958 popl_cfi %edi
16959@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
16960 CFI_RESTORE ebx
16961 ret
16962 CFI_ENDPROC
16963-ENDPROC(csum_partial_copy_generic)
16964+ENDPROC(csum_partial_copy_generic_to_user)
16965
16966 #undef ROUND
16967 #undef ROUND1
16968diff -urNp linux-3.0.4/arch/x86/lib/clear_page_64.S linux-3.0.4/arch/x86/lib/clear_page_64.S
16969--- linux-3.0.4/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
16970+++ linux-3.0.4/arch/x86/lib/clear_page_64.S 2011-08-23 21:47:55.000000000 -0400
16971@@ -58,7 +58,7 @@ ENDPROC(clear_page)
16972
16973 #include <asm/cpufeature.h>
16974
16975- .section .altinstr_replacement,"ax"
16976+ .section .altinstr_replacement,"a"
16977 1: .byte 0xeb /* jmp <disp8> */
16978 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
16979 2: .byte 0xeb /* jmp <disp8> */
16980diff -urNp linux-3.0.4/arch/x86/lib/copy_page_64.S linux-3.0.4/arch/x86/lib/copy_page_64.S
16981--- linux-3.0.4/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
16982+++ linux-3.0.4/arch/x86/lib/copy_page_64.S 2011-08-23 21:47:55.000000000 -0400
16983@@ -104,7 +104,7 @@ ENDPROC(copy_page)
16984
16985 #include <asm/cpufeature.h>
16986
16987- .section .altinstr_replacement,"ax"
16988+ .section .altinstr_replacement,"a"
16989 1: .byte 0xeb /* jmp <disp8> */
16990 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
16991 2:
16992diff -urNp linux-3.0.4/arch/x86/lib/copy_user_64.S linux-3.0.4/arch/x86/lib/copy_user_64.S
16993--- linux-3.0.4/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
16994+++ linux-3.0.4/arch/x86/lib/copy_user_64.S 2011-08-23 21:47:55.000000000 -0400
16995@@ -16,6 +16,7 @@
16996 #include <asm/thread_info.h>
16997 #include <asm/cpufeature.h>
16998 #include <asm/alternative-asm.h>
16999+#include <asm/pgtable.h>
17000
17001 /*
17002 * By placing feature2 after feature1 in altinstructions section, we logically
17003@@ -29,7 +30,7 @@
17004 .byte 0xe9 /* 32bit jump */
17005 .long \orig-1f /* by default jump to orig */
17006 1:
17007- .section .altinstr_replacement,"ax"
17008+ .section .altinstr_replacement,"a"
17009 2: .byte 0xe9 /* near jump with 32bit immediate */
17010 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
17011 3: .byte 0xe9 /* near jump with 32bit immediate */
17012@@ -71,41 +72,13 @@
17013 #endif
17014 .endm
17015
17016-/* Standard copy_to_user with segment limit checking */
17017-ENTRY(_copy_to_user)
17018- CFI_STARTPROC
17019- GET_THREAD_INFO(%rax)
17020- movq %rdi,%rcx
17021- addq %rdx,%rcx
17022- jc bad_to_user
17023- cmpq TI_addr_limit(%rax),%rcx
17024- ja bad_to_user
17025- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17026- copy_user_generic_unrolled,copy_user_generic_string, \
17027- copy_user_enhanced_fast_string
17028- CFI_ENDPROC
17029-ENDPROC(_copy_to_user)
17030-
17031-/* Standard copy_from_user with segment limit checking */
17032-ENTRY(_copy_from_user)
17033- CFI_STARTPROC
17034- GET_THREAD_INFO(%rax)
17035- movq %rsi,%rcx
17036- addq %rdx,%rcx
17037- jc bad_from_user
17038- cmpq TI_addr_limit(%rax),%rcx
17039- ja bad_from_user
17040- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17041- copy_user_generic_unrolled,copy_user_generic_string, \
17042- copy_user_enhanced_fast_string
17043- CFI_ENDPROC
17044-ENDPROC(_copy_from_user)
17045-
17046 .section .fixup,"ax"
17047 /* must zero dest */
17048 ENTRY(bad_from_user)
17049 bad_from_user:
17050 CFI_STARTPROC
17051+ testl %edx,%edx
17052+ js bad_to_user
17053 movl %edx,%ecx
17054 xorl %eax,%eax
17055 rep
17056diff -urNp linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S
17057--- linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
17058+++ linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S 2011-08-23 21:47:55.000000000 -0400
17059@@ -14,6 +14,7 @@
17060 #include <asm/current.h>
17061 #include <asm/asm-offsets.h>
17062 #include <asm/thread_info.h>
17063+#include <asm/pgtable.h>
17064
17065 .macro ALIGN_DESTINATION
17066 #ifdef FIX_ALIGNMENT
17067@@ -50,6 +51,15 @@
17068 */
17069 ENTRY(__copy_user_nocache)
17070 CFI_STARTPROC
17071+
17072+#ifdef CONFIG_PAX_MEMORY_UDEREF
17073+ mov $PAX_USER_SHADOW_BASE,%rcx
17074+ cmp %rcx,%rsi
17075+ jae 1f
17076+ add %rcx,%rsi
17077+1:
17078+#endif
17079+
17080 cmpl $8,%edx
17081 jb 20f /* less then 8 bytes, go to byte copy loop */
17082 ALIGN_DESTINATION
17083diff -urNp linux-3.0.4/arch/x86/lib/csum-wrappers_64.c linux-3.0.4/arch/x86/lib/csum-wrappers_64.c
17084--- linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
17085+++ linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-08-23 21:47:55.000000000 -0400
17086@@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17087 len -= 2;
17088 }
17089 }
17090+
17091+#ifdef CONFIG_PAX_MEMORY_UDEREF
17092+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17093+ src += PAX_USER_SHADOW_BASE;
17094+#endif
17095+
17096 isum = csum_partial_copy_generic((__force const void *)src,
17097 dst, len, isum, errp, NULL);
17098 if (unlikely(*errp))
17099@@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17100 }
17101
17102 *errp = 0;
17103+
17104+#ifdef CONFIG_PAX_MEMORY_UDEREF
17105+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17106+ dst += PAX_USER_SHADOW_BASE;
17107+#endif
17108+
17109 return csum_partial_copy_generic(src, (void __force *)dst,
17110 len, isum, NULL, errp);
17111 }
17112diff -urNp linux-3.0.4/arch/x86/lib/getuser.S linux-3.0.4/arch/x86/lib/getuser.S
17113--- linux-3.0.4/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
17114+++ linux-3.0.4/arch/x86/lib/getuser.S 2011-08-23 21:47:55.000000000 -0400
17115@@ -33,14 +33,35 @@
17116 #include <asm/asm-offsets.h>
17117 #include <asm/thread_info.h>
17118 #include <asm/asm.h>
17119+#include <asm/segment.h>
17120+#include <asm/pgtable.h>
17121+
17122+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17123+#define __copyuser_seg gs;
17124+#else
17125+#define __copyuser_seg
17126+#endif
17127
17128 .text
17129 ENTRY(__get_user_1)
17130 CFI_STARTPROC
17131+
17132+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17133 GET_THREAD_INFO(%_ASM_DX)
17134 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17135 jae bad_get_user
17136-1: movzb (%_ASM_AX),%edx
17137+
17138+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17139+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17140+ cmp %_ASM_DX,%_ASM_AX
17141+ jae 1234f
17142+ add %_ASM_DX,%_ASM_AX
17143+1234:
17144+#endif
17145+
17146+#endif
17147+
17148+1: __copyuser_seg movzb (%_ASM_AX),%edx
17149 xor %eax,%eax
17150 ret
17151 CFI_ENDPROC
17152@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17153 ENTRY(__get_user_2)
17154 CFI_STARTPROC
17155 add $1,%_ASM_AX
17156+
17157+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17158 jc bad_get_user
17159 GET_THREAD_INFO(%_ASM_DX)
17160 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17161 jae bad_get_user
17162-2: movzwl -1(%_ASM_AX),%edx
17163+
17164+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17165+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17166+ cmp %_ASM_DX,%_ASM_AX
17167+ jae 1234f
17168+ add %_ASM_DX,%_ASM_AX
17169+1234:
17170+#endif
17171+
17172+#endif
17173+
17174+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17175 xor %eax,%eax
17176 ret
17177 CFI_ENDPROC
17178@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17179 ENTRY(__get_user_4)
17180 CFI_STARTPROC
17181 add $3,%_ASM_AX
17182+
17183+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17184 jc bad_get_user
17185 GET_THREAD_INFO(%_ASM_DX)
17186 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17187 jae bad_get_user
17188-3: mov -3(%_ASM_AX),%edx
17189+
17190+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17191+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17192+ cmp %_ASM_DX,%_ASM_AX
17193+ jae 1234f
17194+ add %_ASM_DX,%_ASM_AX
17195+1234:
17196+#endif
17197+
17198+#endif
17199+
17200+3: __copyuser_seg mov -3(%_ASM_AX),%edx
17201 xor %eax,%eax
17202 ret
17203 CFI_ENDPROC
17204@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17205 GET_THREAD_INFO(%_ASM_DX)
17206 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17207 jae bad_get_user
17208+
17209+#ifdef CONFIG_PAX_MEMORY_UDEREF
17210+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17211+ cmp %_ASM_DX,%_ASM_AX
17212+ jae 1234f
17213+ add %_ASM_DX,%_ASM_AX
17214+1234:
17215+#endif
17216+
17217 4: movq -7(%_ASM_AX),%_ASM_DX
17218 xor %eax,%eax
17219 ret
17220diff -urNp linux-3.0.4/arch/x86/lib/insn.c linux-3.0.4/arch/x86/lib/insn.c
17221--- linux-3.0.4/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
17222+++ linux-3.0.4/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
17223@@ -21,6 +21,11 @@
17224 #include <linux/string.h>
17225 #include <asm/inat.h>
17226 #include <asm/insn.h>
17227+#ifdef __KERNEL__
17228+#include <asm/pgtable_types.h>
17229+#else
17230+#define ktla_ktva(addr) addr
17231+#endif
17232
17233 #define get_next(t, insn) \
17234 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17235@@ -40,8 +45,8 @@
17236 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17237 {
17238 memset(insn, 0, sizeof(*insn));
17239- insn->kaddr = kaddr;
17240- insn->next_byte = kaddr;
17241+ insn->kaddr = ktla_ktva(kaddr);
17242+ insn->next_byte = ktla_ktva(kaddr);
17243 insn->x86_64 = x86_64 ? 1 : 0;
17244 insn->opnd_bytes = 4;
17245 if (x86_64)
17246diff -urNp linux-3.0.4/arch/x86/lib/mmx_32.c linux-3.0.4/arch/x86/lib/mmx_32.c
17247--- linux-3.0.4/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
17248+++ linux-3.0.4/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
17249@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17250 {
17251 void *p;
17252 int i;
17253+ unsigned long cr0;
17254
17255 if (unlikely(in_interrupt()))
17256 return __memcpy(to, from, len);
17257@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17258 kernel_fpu_begin();
17259
17260 __asm__ __volatile__ (
17261- "1: prefetch (%0)\n" /* This set is 28 bytes */
17262- " prefetch 64(%0)\n"
17263- " prefetch 128(%0)\n"
17264- " prefetch 192(%0)\n"
17265- " prefetch 256(%0)\n"
17266+ "1: prefetch (%1)\n" /* This set is 28 bytes */
17267+ " prefetch 64(%1)\n"
17268+ " prefetch 128(%1)\n"
17269+ " prefetch 192(%1)\n"
17270+ " prefetch 256(%1)\n"
17271 "2: \n"
17272 ".section .fixup, \"ax\"\n"
17273- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17274+ "3: \n"
17275+
17276+#ifdef CONFIG_PAX_KERNEXEC
17277+ " movl %%cr0, %0\n"
17278+ " movl %0, %%eax\n"
17279+ " andl $0xFFFEFFFF, %%eax\n"
17280+ " movl %%eax, %%cr0\n"
17281+#endif
17282+
17283+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17284+
17285+#ifdef CONFIG_PAX_KERNEXEC
17286+ " movl %0, %%cr0\n"
17287+#endif
17288+
17289 " jmp 2b\n"
17290 ".previous\n"
17291 _ASM_EXTABLE(1b, 3b)
17292- : : "r" (from));
17293+ : "=&r" (cr0) : "r" (from) : "ax");
17294
17295 for ( ; i > 5; i--) {
17296 __asm__ __volatile__ (
17297- "1: prefetch 320(%0)\n"
17298- "2: movq (%0), %%mm0\n"
17299- " movq 8(%0), %%mm1\n"
17300- " movq 16(%0), %%mm2\n"
17301- " movq 24(%0), %%mm3\n"
17302- " movq %%mm0, (%1)\n"
17303- " movq %%mm1, 8(%1)\n"
17304- " movq %%mm2, 16(%1)\n"
17305- " movq %%mm3, 24(%1)\n"
17306- " movq 32(%0), %%mm0\n"
17307- " movq 40(%0), %%mm1\n"
17308- " movq 48(%0), %%mm2\n"
17309- " movq 56(%0), %%mm3\n"
17310- " movq %%mm0, 32(%1)\n"
17311- " movq %%mm1, 40(%1)\n"
17312- " movq %%mm2, 48(%1)\n"
17313- " movq %%mm3, 56(%1)\n"
17314+ "1: prefetch 320(%1)\n"
17315+ "2: movq (%1), %%mm0\n"
17316+ " movq 8(%1), %%mm1\n"
17317+ " movq 16(%1), %%mm2\n"
17318+ " movq 24(%1), %%mm3\n"
17319+ " movq %%mm0, (%2)\n"
17320+ " movq %%mm1, 8(%2)\n"
17321+ " movq %%mm2, 16(%2)\n"
17322+ " movq %%mm3, 24(%2)\n"
17323+ " movq 32(%1), %%mm0\n"
17324+ " movq 40(%1), %%mm1\n"
17325+ " movq 48(%1), %%mm2\n"
17326+ " movq 56(%1), %%mm3\n"
17327+ " movq %%mm0, 32(%2)\n"
17328+ " movq %%mm1, 40(%2)\n"
17329+ " movq %%mm2, 48(%2)\n"
17330+ " movq %%mm3, 56(%2)\n"
17331 ".section .fixup, \"ax\"\n"
17332- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17333+ "3:\n"
17334+
17335+#ifdef CONFIG_PAX_KERNEXEC
17336+ " movl %%cr0, %0\n"
17337+ " movl %0, %%eax\n"
17338+ " andl $0xFFFEFFFF, %%eax\n"
17339+ " movl %%eax, %%cr0\n"
17340+#endif
17341+
17342+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17343+
17344+#ifdef CONFIG_PAX_KERNEXEC
17345+ " movl %0, %%cr0\n"
17346+#endif
17347+
17348 " jmp 2b\n"
17349 ".previous\n"
17350 _ASM_EXTABLE(1b, 3b)
17351- : : "r" (from), "r" (to) : "memory");
17352+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17353
17354 from += 64;
17355 to += 64;
17356@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17357 static void fast_copy_page(void *to, void *from)
17358 {
17359 int i;
17360+ unsigned long cr0;
17361
17362 kernel_fpu_begin();
17363
17364@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17365 * but that is for later. -AV
17366 */
17367 __asm__ __volatile__(
17368- "1: prefetch (%0)\n"
17369- " prefetch 64(%0)\n"
17370- " prefetch 128(%0)\n"
17371- " prefetch 192(%0)\n"
17372- " prefetch 256(%0)\n"
17373+ "1: prefetch (%1)\n"
17374+ " prefetch 64(%1)\n"
17375+ " prefetch 128(%1)\n"
17376+ " prefetch 192(%1)\n"
17377+ " prefetch 256(%1)\n"
17378 "2: \n"
17379 ".section .fixup, \"ax\"\n"
17380- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17381+ "3: \n"
17382+
17383+#ifdef CONFIG_PAX_KERNEXEC
17384+ " movl %%cr0, %0\n"
17385+ " movl %0, %%eax\n"
17386+ " andl $0xFFFEFFFF, %%eax\n"
17387+ " movl %%eax, %%cr0\n"
17388+#endif
17389+
17390+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17391+
17392+#ifdef CONFIG_PAX_KERNEXEC
17393+ " movl %0, %%cr0\n"
17394+#endif
17395+
17396 " jmp 2b\n"
17397 ".previous\n"
17398- _ASM_EXTABLE(1b, 3b) : : "r" (from));
17399+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17400
17401 for (i = 0; i < (4096-320)/64; i++) {
17402 __asm__ __volatile__ (
17403- "1: prefetch 320(%0)\n"
17404- "2: movq (%0), %%mm0\n"
17405- " movntq %%mm0, (%1)\n"
17406- " movq 8(%0), %%mm1\n"
17407- " movntq %%mm1, 8(%1)\n"
17408- " movq 16(%0), %%mm2\n"
17409- " movntq %%mm2, 16(%1)\n"
17410- " movq 24(%0), %%mm3\n"
17411- " movntq %%mm3, 24(%1)\n"
17412- " movq 32(%0), %%mm4\n"
17413- " movntq %%mm4, 32(%1)\n"
17414- " movq 40(%0), %%mm5\n"
17415- " movntq %%mm5, 40(%1)\n"
17416- " movq 48(%0), %%mm6\n"
17417- " movntq %%mm6, 48(%1)\n"
17418- " movq 56(%0), %%mm7\n"
17419- " movntq %%mm7, 56(%1)\n"
17420+ "1: prefetch 320(%1)\n"
17421+ "2: movq (%1), %%mm0\n"
17422+ " movntq %%mm0, (%2)\n"
17423+ " movq 8(%1), %%mm1\n"
17424+ " movntq %%mm1, 8(%2)\n"
17425+ " movq 16(%1), %%mm2\n"
17426+ " movntq %%mm2, 16(%2)\n"
17427+ " movq 24(%1), %%mm3\n"
17428+ " movntq %%mm3, 24(%2)\n"
17429+ " movq 32(%1), %%mm4\n"
17430+ " movntq %%mm4, 32(%2)\n"
17431+ " movq 40(%1), %%mm5\n"
17432+ " movntq %%mm5, 40(%2)\n"
17433+ " movq 48(%1), %%mm6\n"
17434+ " movntq %%mm6, 48(%2)\n"
17435+ " movq 56(%1), %%mm7\n"
17436+ " movntq %%mm7, 56(%2)\n"
17437 ".section .fixup, \"ax\"\n"
17438- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17439+ "3:\n"
17440+
17441+#ifdef CONFIG_PAX_KERNEXEC
17442+ " movl %%cr0, %0\n"
17443+ " movl %0, %%eax\n"
17444+ " andl $0xFFFEFFFF, %%eax\n"
17445+ " movl %%eax, %%cr0\n"
17446+#endif
17447+
17448+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17449+
17450+#ifdef CONFIG_PAX_KERNEXEC
17451+ " movl %0, %%cr0\n"
17452+#endif
17453+
17454 " jmp 2b\n"
17455 ".previous\n"
17456- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17457+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17458
17459 from += 64;
17460 to += 64;
17461@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17462 static void fast_copy_page(void *to, void *from)
17463 {
17464 int i;
17465+ unsigned long cr0;
17466
17467 kernel_fpu_begin();
17468
17469 __asm__ __volatile__ (
17470- "1: prefetch (%0)\n"
17471- " prefetch 64(%0)\n"
17472- " prefetch 128(%0)\n"
17473- " prefetch 192(%0)\n"
17474- " prefetch 256(%0)\n"
17475+ "1: prefetch (%1)\n"
17476+ " prefetch 64(%1)\n"
17477+ " prefetch 128(%1)\n"
17478+ " prefetch 192(%1)\n"
17479+ " prefetch 256(%1)\n"
17480 "2: \n"
17481 ".section .fixup, \"ax\"\n"
17482- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17483+ "3: \n"
17484+
17485+#ifdef CONFIG_PAX_KERNEXEC
17486+ " movl %%cr0, %0\n"
17487+ " movl %0, %%eax\n"
17488+ " andl $0xFFFEFFFF, %%eax\n"
17489+ " movl %%eax, %%cr0\n"
17490+#endif
17491+
17492+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17493+
17494+#ifdef CONFIG_PAX_KERNEXEC
17495+ " movl %0, %%cr0\n"
17496+#endif
17497+
17498 " jmp 2b\n"
17499 ".previous\n"
17500- _ASM_EXTABLE(1b, 3b) : : "r" (from));
17501+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17502
17503 for (i = 0; i < 4096/64; i++) {
17504 __asm__ __volatile__ (
17505- "1: prefetch 320(%0)\n"
17506- "2: movq (%0), %%mm0\n"
17507- " movq 8(%0), %%mm1\n"
17508- " movq 16(%0), %%mm2\n"
17509- " movq 24(%0), %%mm3\n"
17510- " movq %%mm0, (%1)\n"
17511- " movq %%mm1, 8(%1)\n"
17512- " movq %%mm2, 16(%1)\n"
17513- " movq %%mm3, 24(%1)\n"
17514- " movq 32(%0), %%mm0\n"
17515- " movq 40(%0), %%mm1\n"
17516- " movq 48(%0), %%mm2\n"
17517- " movq 56(%0), %%mm3\n"
17518- " movq %%mm0, 32(%1)\n"
17519- " movq %%mm1, 40(%1)\n"
17520- " movq %%mm2, 48(%1)\n"
17521- " movq %%mm3, 56(%1)\n"
17522+ "1: prefetch 320(%1)\n"
17523+ "2: movq (%1), %%mm0\n"
17524+ " movq 8(%1), %%mm1\n"
17525+ " movq 16(%1), %%mm2\n"
17526+ " movq 24(%1), %%mm3\n"
17527+ " movq %%mm0, (%2)\n"
17528+ " movq %%mm1, 8(%2)\n"
17529+ " movq %%mm2, 16(%2)\n"
17530+ " movq %%mm3, 24(%2)\n"
17531+ " movq 32(%1), %%mm0\n"
17532+ " movq 40(%1), %%mm1\n"
17533+ " movq 48(%1), %%mm2\n"
17534+ " movq 56(%1), %%mm3\n"
17535+ " movq %%mm0, 32(%2)\n"
17536+ " movq %%mm1, 40(%2)\n"
17537+ " movq %%mm2, 48(%2)\n"
17538+ " movq %%mm3, 56(%2)\n"
17539 ".section .fixup, \"ax\"\n"
17540- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17541+ "3:\n"
17542+
17543+#ifdef CONFIG_PAX_KERNEXEC
17544+ " movl %%cr0, %0\n"
17545+ " movl %0, %%eax\n"
17546+ " andl $0xFFFEFFFF, %%eax\n"
17547+ " movl %%eax, %%cr0\n"
17548+#endif
17549+
17550+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17551+
17552+#ifdef CONFIG_PAX_KERNEXEC
17553+ " movl %0, %%cr0\n"
17554+#endif
17555+
17556 " jmp 2b\n"
17557 ".previous\n"
17558 _ASM_EXTABLE(1b, 3b)
17559- : : "r" (from), "r" (to) : "memory");
17560+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17561
17562 from += 64;
17563 to += 64;
17564diff -urNp linux-3.0.4/arch/x86/lib/putuser.S linux-3.0.4/arch/x86/lib/putuser.S
17565--- linux-3.0.4/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
17566+++ linux-3.0.4/arch/x86/lib/putuser.S 2011-08-23 21:47:55.000000000 -0400
17567@@ -15,7 +15,8 @@
17568 #include <asm/thread_info.h>
17569 #include <asm/errno.h>
17570 #include <asm/asm.h>
17571-
17572+#include <asm/segment.h>
17573+#include <asm/pgtable.h>
17574
17575 /*
17576 * __put_user_X
17577@@ -29,52 +30,119 @@
17578 * as they get called from within inline assembly.
17579 */
17580
17581-#define ENTER CFI_STARTPROC ; \
17582- GET_THREAD_INFO(%_ASM_BX)
17583+#define ENTER CFI_STARTPROC
17584 #define EXIT ret ; \
17585 CFI_ENDPROC
17586
17587+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17588+#define _DEST %_ASM_CX,%_ASM_BX
17589+#else
17590+#define _DEST %_ASM_CX
17591+#endif
17592+
17593+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17594+#define __copyuser_seg gs;
17595+#else
17596+#define __copyuser_seg
17597+#endif
17598+
17599 .text
17600 ENTRY(__put_user_1)
17601 ENTER
17602+
17603+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17604+ GET_THREAD_INFO(%_ASM_BX)
17605 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17606 jae bad_put_user
17607-1: movb %al,(%_ASM_CX)
17608+
17609+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17610+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17611+ cmp %_ASM_BX,%_ASM_CX
17612+ jb 1234f
17613+ xor %ebx,%ebx
17614+1234:
17615+#endif
17616+
17617+#endif
17618+
17619+1: __copyuser_seg movb %al,(_DEST)
17620 xor %eax,%eax
17621 EXIT
17622 ENDPROC(__put_user_1)
17623
17624 ENTRY(__put_user_2)
17625 ENTER
17626+
17627+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17628+ GET_THREAD_INFO(%_ASM_BX)
17629 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17630 sub $1,%_ASM_BX
17631 cmp %_ASM_BX,%_ASM_CX
17632 jae bad_put_user
17633-2: movw %ax,(%_ASM_CX)
17634+
17635+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17636+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17637+ cmp %_ASM_BX,%_ASM_CX
17638+ jb 1234f
17639+ xor %ebx,%ebx
17640+1234:
17641+#endif
17642+
17643+#endif
17644+
17645+2: __copyuser_seg movw %ax,(_DEST)
17646 xor %eax,%eax
17647 EXIT
17648 ENDPROC(__put_user_2)
17649
17650 ENTRY(__put_user_4)
17651 ENTER
17652+
17653+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17654+ GET_THREAD_INFO(%_ASM_BX)
17655 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17656 sub $3,%_ASM_BX
17657 cmp %_ASM_BX,%_ASM_CX
17658 jae bad_put_user
17659-3: movl %eax,(%_ASM_CX)
17660+
17661+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17662+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17663+ cmp %_ASM_BX,%_ASM_CX
17664+ jb 1234f
17665+ xor %ebx,%ebx
17666+1234:
17667+#endif
17668+
17669+#endif
17670+
17671+3: __copyuser_seg movl %eax,(_DEST)
17672 xor %eax,%eax
17673 EXIT
17674 ENDPROC(__put_user_4)
17675
17676 ENTRY(__put_user_8)
17677 ENTER
17678+
17679+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17680+ GET_THREAD_INFO(%_ASM_BX)
17681 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17682 sub $7,%_ASM_BX
17683 cmp %_ASM_BX,%_ASM_CX
17684 jae bad_put_user
17685-4: mov %_ASM_AX,(%_ASM_CX)
17686+
17687+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17688+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17689+ cmp %_ASM_BX,%_ASM_CX
17690+ jb 1234f
17691+ xor %ebx,%ebx
17692+1234:
17693+#endif
17694+
17695+#endif
17696+
17697+4: __copyuser_seg mov %_ASM_AX,(_DEST)
17698 #ifdef CONFIG_X86_32
17699-5: movl %edx,4(%_ASM_CX)
17700+5: __copyuser_seg movl %edx,4(_DEST)
17701 #endif
17702 xor %eax,%eax
17703 EXIT
17704diff -urNp linux-3.0.4/arch/x86/lib/usercopy_32.c linux-3.0.4/arch/x86/lib/usercopy_32.c
17705--- linux-3.0.4/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
17706+++ linux-3.0.4/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
17707@@ -43,7 +43,7 @@ do { \
17708 __asm__ __volatile__( \
17709 " testl %1,%1\n" \
17710 " jz 2f\n" \
17711- "0: lodsb\n" \
17712+ "0: "__copyuser_seg"lodsb\n" \
17713 " stosb\n" \
17714 " testb %%al,%%al\n" \
17715 " jz 1f\n" \
17716@@ -128,10 +128,12 @@ do { \
17717 int __d0; \
17718 might_fault(); \
17719 __asm__ __volatile__( \
17720+ __COPYUSER_SET_ES \
17721 "0: rep; stosl\n" \
17722 " movl %2,%0\n" \
17723 "1: rep; stosb\n" \
17724 "2:\n" \
17725+ __COPYUSER_RESTORE_ES \
17726 ".section .fixup,\"ax\"\n" \
17727 "3: lea 0(%2,%0,4),%0\n" \
17728 " jmp 2b\n" \
17729@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
17730 might_fault();
17731
17732 __asm__ __volatile__(
17733+ __COPYUSER_SET_ES
17734 " testl %0, %0\n"
17735 " jz 3f\n"
17736 " andl %0,%%ecx\n"
17737@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
17738 " subl %%ecx,%0\n"
17739 " addl %0,%%eax\n"
17740 "1:\n"
17741+ __COPYUSER_RESTORE_ES
17742 ".section .fixup,\"ax\"\n"
17743 "2: xorl %%eax,%%eax\n"
17744 " jmp 1b\n"
17745@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
17746
17747 #ifdef CONFIG_X86_INTEL_USERCOPY
17748 static unsigned long
17749-__copy_user_intel(void __user *to, const void *from, unsigned long size)
17750+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
17751 {
17752 int d0, d1;
17753 __asm__ __volatile__(
17754@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
17755 " .align 2,0x90\n"
17756 "3: movl 0(%4), %%eax\n"
17757 "4: movl 4(%4), %%edx\n"
17758- "5: movl %%eax, 0(%3)\n"
17759- "6: movl %%edx, 4(%3)\n"
17760+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
17761+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
17762 "7: movl 8(%4), %%eax\n"
17763 "8: movl 12(%4),%%edx\n"
17764- "9: movl %%eax, 8(%3)\n"
17765- "10: movl %%edx, 12(%3)\n"
17766+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
17767+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
17768 "11: movl 16(%4), %%eax\n"
17769 "12: movl 20(%4), %%edx\n"
17770- "13: movl %%eax, 16(%3)\n"
17771- "14: movl %%edx, 20(%3)\n"
17772+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
17773+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
17774 "15: movl 24(%4), %%eax\n"
17775 "16: movl 28(%4), %%edx\n"
17776- "17: movl %%eax, 24(%3)\n"
17777- "18: movl %%edx, 28(%3)\n"
17778+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
17779+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
17780 "19: movl 32(%4), %%eax\n"
17781 "20: movl 36(%4), %%edx\n"
17782- "21: movl %%eax, 32(%3)\n"
17783- "22: movl %%edx, 36(%3)\n"
17784+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
17785+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
17786 "23: movl 40(%4), %%eax\n"
17787 "24: movl 44(%4), %%edx\n"
17788- "25: movl %%eax, 40(%3)\n"
17789- "26: movl %%edx, 44(%3)\n"
17790+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
17791+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
17792 "27: movl 48(%4), %%eax\n"
17793 "28: movl 52(%4), %%edx\n"
17794- "29: movl %%eax, 48(%3)\n"
17795- "30: movl %%edx, 52(%3)\n"
17796+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
17797+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
17798 "31: movl 56(%4), %%eax\n"
17799 "32: movl 60(%4), %%edx\n"
17800- "33: movl %%eax, 56(%3)\n"
17801- "34: movl %%edx, 60(%3)\n"
17802+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
17803+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
17804 " addl $-64, %0\n"
17805 " addl $64, %4\n"
17806 " addl $64, %3\n"
17807@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
17808 " shrl $2, %0\n"
17809 " andl $3, %%eax\n"
17810 " cld\n"
17811+ __COPYUSER_SET_ES
17812 "99: rep; movsl\n"
17813 "36: movl %%eax, %0\n"
17814 "37: rep; movsb\n"
17815 "100:\n"
17816+ __COPYUSER_RESTORE_ES
17817+ ".section .fixup,\"ax\"\n"
17818+ "101: lea 0(%%eax,%0,4),%0\n"
17819+ " jmp 100b\n"
17820+ ".previous\n"
17821+ ".section __ex_table,\"a\"\n"
17822+ " .align 4\n"
17823+ " .long 1b,100b\n"
17824+ " .long 2b,100b\n"
17825+ " .long 3b,100b\n"
17826+ " .long 4b,100b\n"
17827+ " .long 5b,100b\n"
17828+ " .long 6b,100b\n"
17829+ " .long 7b,100b\n"
17830+ " .long 8b,100b\n"
17831+ " .long 9b,100b\n"
17832+ " .long 10b,100b\n"
17833+ " .long 11b,100b\n"
17834+ " .long 12b,100b\n"
17835+ " .long 13b,100b\n"
17836+ " .long 14b,100b\n"
17837+ " .long 15b,100b\n"
17838+ " .long 16b,100b\n"
17839+ " .long 17b,100b\n"
17840+ " .long 18b,100b\n"
17841+ " .long 19b,100b\n"
17842+ " .long 20b,100b\n"
17843+ " .long 21b,100b\n"
17844+ " .long 22b,100b\n"
17845+ " .long 23b,100b\n"
17846+ " .long 24b,100b\n"
17847+ " .long 25b,100b\n"
17848+ " .long 26b,100b\n"
17849+ " .long 27b,100b\n"
17850+ " .long 28b,100b\n"
17851+ " .long 29b,100b\n"
17852+ " .long 30b,100b\n"
17853+ " .long 31b,100b\n"
17854+ " .long 32b,100b\n"
17855+ " .long 33b,100b\n"
17856+ " .long 34b,100b\n"
17857+ " .long 35b,100b\n"
17858+ " .long 36b,100b\n"
17859+ " .long 37b,100b\n"
17860+ " .long 99b,101b\n"
17861+ ".previous"
17862+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
17863+ : "1"(to), "2"(from), "0"(size)
17864+ : "eax", "edx", "memory");
17865+ return size;
17866+}
17867+
17868+static unsigned long
17869+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
17870+{
17871+ int d0, d1;
17872+ __asm__ __volatile__(
17873+ " .align 2,0x90\n"
17874+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
17875+ " cmpl $67, %0\n"
17876+ " jbe 3f\n"
17877+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
17878+ " .align 2,0x90\n"
17879+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
17880+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
17881+ "5: movl %%eax, 0(%3)\n"
17882+ "6: movl %%edx, 4(%3)\n"
17883+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
17884+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
17885+ "9: movl %%eax, 8(%3)\n"
17886+ "10: movl %%edx, 12(%3)\n"
17887+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
17888+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
17889+ "13: movl %%eax, 16(%3)\n"
17890+ "14: movl %%edx, 20(%3)\n"
17891+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
17892+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
17893+ "17: movl %%eax, 24(%3)\n"
17894+ "18: movl %%edx, 28(%3)\n"
17895+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
17896+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
17897+ "21: movl %%eax, 32(%3)\n"
17898+ "22: movl %%edx, 36(%3)\n"
17899+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
17900+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
17901+ "25: movl %%eax, 40(%3)\n"
17902+ "26: movl %%edx, 44(%3)\n"
17903+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
17904+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
17905+ "29: movl %%eax, 48(%3)\n"
17906+ "30: movl %%edx, 52(%3)\n"
17907+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
17908+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
17909+ "33: movl %%eax, 56(%3)\n"
17910+ "34: movl %%edx, 60(%3)\n"
17911+ " addl $-64, %0\n"
17912+ " addl $64, %4\n"
17913+ " addl $64, %3\n"
17914+ " cmpl $63, %0\n"
17915+ " ja 1b\n"
17916+ "35: movl %0, %%eax\n"
17917+ " shrl $2, %0\n"
17918+ " andl $3, %%eax\n"
17919+ " cld\n"
17920+ "99: rep; "__copyuser_seg" movsl\n"
17921+ "36: movl %%eax, %0\n"
17922+ "37: rep; "__copyuser_seg" movsb\n"
17923+ "100:\n"
17924 ".section .fixup,\"ax\"\n"
17925 "101: lea 0(%%eax,%0,4),%0\n"
17926 " jmp 100b\n"
17927@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
17928 int d0, d1;
17929 __asm__ __volatile__(
17930 " .align 2,0x90\n"
17931- "0: movl 32(%4), %%eax\n"
17932+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
17933 " cmpl $67, %0\n"
17934 " jbe 2f\n"
17935- "1: movl 64(%4), %%eax\n"
17936+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
17937 " .align 2,0x90\n"
17938- "2: movl 0(%4), %%eax\n"
17939- "21: movl 4(%4), %%edx\n"
17940+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
17941+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
17942 " movl %%eax, 0(%3)\n"
17943 " movl %%edx, 4(%3)\n"
17944- "3: movl 8(%4), %%eax\n"
17945- "31: movl 12(%4),%%edx\n"
17946+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
17947+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
17948 " movl %%eax, 8(%3)\n"
17949 " movl %%edx, 12(%3)\n"
17950- "4: movl 16(%4), %%eax\n"
17951- "41: movl 20(%4), %%edx\n"
17952+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
17953+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
17954 " movl %%eax, 16(%3)\n"
17955 " movl %%edx, 20(%3)\n"
17956- "10: movl 24(%4), %%eax\n"
17957- "51: movl 28(%4), %%edx\n"
17958+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
17959+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
17960 " movl %%eax, 24(%3)\n"
17961 " movl %%edx, 28(%3)\n"
17962- "11: movl 32(%4), %%eax\n"
17963- "61: movl 36(%4), %%edx\n"
17964+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
17965+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
17966 " movl %%eax, 32(%3)\n"
17967 " movl %%edx, 36(%3)\n"
17968- "12: movl 40(%4), %%eax\n"
17969- "71: movl 44(%4), %%edx\n"
17970+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
17971+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
17972 " movl %%eax, 40(%3)\n"
17973 " movl %%edx, 44(%3)\n"
17974- "13: movl 48(%4), %%eax\n"
17975- "81: movl 52(%4), %%edx\n"
17976+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
17977+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
17978 " movl %%eax, 48(%3)\n"
17979 " movl %%edx, 52(%3)\n"
17980- "14: movl 56(%4), %%eax\n"
17981- "91: movl 60(%4), %%edx\n"
17982+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
17983+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
17984 " movl %%eax, 56(%3)\n"
17985 " movl %%edx, 60(%3)\n"
17986 " addl $-64, %0\n"
17987@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
17988 " shrl $2, %0\n"
17989 " andl $3, %%eax\n"
17990 " cld\n"
17991- "6: rep; movsl\n"
17992+ "6: rep; "__copyuser_seg" movsl\n"
17993 " movl %%eax,%0\n"
17994- "7: rep; movsb\n"
17995+ "7: rep; "__copyuser_seg" movsb\n"
17996 "8:\n"
17997 ".section .fixup,\"ax\"\n"
17998 "9: lea 0(%%eax,%0,4),%0\n"
17999@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
18000
18001 __asm__ __volatile__(
18002 " .align 2,0x90\n"
18003- "0: movl 32(%4), %%eax\n"
18004+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18005 " cmpl $67, %0\n"
18006 " jbe 2f\n"
18007- "1: movl 64(%4), %%eax\n"
18008+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18009 " .align 2,0x90\n"
18010- "2: movl 0(%4), %%eax\n"
18011- "21: movl 4(%4), %%edx\n"
18012+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18013+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18014 " movnti %%eax, 0(%3)\n"
18015 " movnti %%edx, 4(%3)\n"
18016- "3: movl 8(%4), %%eax\n"
18017- "31: movl 12(%4),%%edx\n"
18018+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18019+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18020 " movnti %%eax, 8(%3)\n"
18021 " movnti %%edx, 12(%3)\n"
18022- "4: movl 16(%4), %%eax\n"
18023- "41: movl 20(%4), %%edx\n"
18024+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18025+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18026 " movnti %%eax, 16(%3)\n"
18027 " movnti %%edx, 20(%3)\n"
18028- "10: movl 24(%4), %%eax\n"
18029- "51: movl 28(%4), %%edx\n"
18030+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18031+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18032 " movnti %%eax, 24(%3)\n"
18033 " movnti %%edx, 28(%3)\n"
18034- "11: movl 32(%4), %%eax\n"
18035- "61: movl 36(%4), %%edx\n"
18036+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18037+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18038 " movnti %%eax, 32(%3)\n"
18039 " movnti %%edx, 36(%3)\n"
18040- "12: movl 40(%4), %%eax\n"
18041- "71: movl 44(%4), %%edx\n"
18042+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18043+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18044 " movnti %%eax, 40(%3)\n"
18045 " movnti %%edx, 44(%3)\n"
18046- "13: movl 48(%4), %%eax\n"
18047- "81: movl 52(%4), %%edx\n"
18048+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18049+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18050 " movnti %%eax, 48(%3)\n"
18051 " movnti %%edx, 52(%3)\n"
18052- "14: movl 56(%4), %%eax\n"
18053- "91: movl 60(%4), %%edx\n"
18054+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18055+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18056 " movnti %%eax, 56(%3)\n"
18057 " movnti %%edx, 60(%3)\n"
18058 " addl $-64, %0\n"
18059@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18060 " shrl $2, %0\n"
18061 " andl $3, %%eax\n"
18062 " cld\n"
18063- "6: rep; movsl\n"
18064+ "6: rep; "__copyuser_seg" movsl\n"
18065 " movl %%eax,%0\n"
18066- "7: rep; movsb\n"
18067+ "7: rep; "__copyuser_seg" movsb\n"
18068 "8:\n"
18069 ".section .fixup,\"ax\"\n"
18070 "9: lea 0(%%eax,%0,4),%0\n"
18071@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18072
18073 __asm__ __volatile__(
18074 " .align 2,0x90\n"
18075- "0: movl 32(%4), %%eax\n"
18076+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18077 " cmpl $67, %0\n"
18078 " jbe 2f\n"
18079- "1: movl 64(%4), %%eax\n"
18080+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18081 " .align 2,0x90\n"
18082- "2: movl 0(%4), %%eax\n"
18083- "21: movl 4(%4), %%edx\n"
18084+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18085+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18086 " movnti %%eax, 0(%3)\n"
18087 " movnti %%edx, 4(%3)\n"
18088- "3: movl 8(%4), %%eax\n"
18089- "31: movl 12(%4),%%edx\n"
18090+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18091+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18092 " movnti %%eax, 8(%3)\n"
18093 " movnti %%edx, 12(%3)\n"
18094- "4: movl 16(%4), %%eax\n"
18095- "41: movl 20(%4), %%edx\n"
18096+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18097+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18098 " movnti %%eax, 16(%3)\n"
18099 " movnti %%edx, 20(%3)\n"
18100- "10: movl 24(%4), %%eax\n"
18101- "51: movl 28(%4), %%edx\n"
18102+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18103+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18104 " movnti %%eax, 24(%3)\n"
18105 " movnti %%edx, 28(%3)\n"
18106- "11: movl 32(%4), %%eax\n"
18107- "61: movl 36(%4), %%edx\n"
18108+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18109+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18110 " movnti %%eax, 32(%3)\n"
18111 " movnti %%edx, 36(%3)\n"
18112- "12: movl 40(%4), %%eax\n"
18113- "71: movl 44(%4), %%edx\n"
18114+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18115+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18116 " movnti %%eax, 40(%3)\n"
18117 " movnti %%edx, 44(%3)\n"
18118- "13: movl 48(%4), %%eax\n"
18119- "81: movl 52(%4), %%edx\n"
18120+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18121+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18122 " movnti %%eax, 48(%3)\n"
18123 " movnti %%edx, 52(%3)\n"
18124- "14: movl 56(%4), %%eax\n"
18125- "91: movl 60(%4), %%edx\n"
18126+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18127+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18128 " movnti %%eax, 56(%3)\n"
18129 " movnti %%edx, 60(%3)\n"
18130 " addl $-64, %0\n"
18131@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18132 " shrl $2, %0\n"
18133 " andl $3, %%eax\n"
18134 " cld\n"
18135- "6: rep; movsl\n"
18136+ "6: rep; "__copyuser_seg" movsl\n"
18137 " movl %%eax,%0\n"
18138- "7: rep; movsb\n"
18139+ "7: rep; "__copyuser_seg" movsb\n"
18140 "8:\n"
18141 ".section .fixup,\"ax\"\n"
18142 "9: lea 0(%%eax,%0,4),%0\n"
18143@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18144 */
18145 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18146 unsigned long size);
18147-unsigned long __copy_user_intel(void __user *to, const void *from,
18148+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18149+ unsigned long size);
18150+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18151 unsigned long size);
18152 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18153 const void __user *from, unsigned long size);
18154 #endif /* CONFIG_X86_INTEL_USERCOPY */
18155
18156 /* Generic arbitrary sized copy. */
18157-#define __copy_user(to, from, size) \
18158+#define __copy_user(to, from, size, prefix, set, restore) \
18159 do { \
18160 int __d0, __d1, __d2; \
18161 __asm__ __volatile__( \
18162+ set \
18163 " cmp $7,%0\n" \
18164 " jbe 1f\n" \
18165 " movl %1,%0\n" \
18166 " negl %0\n" \
18167 " andl $7,%0\n" \
18168 " subl %0,%3\n" \
18169- "4: rep; movsb\n" \
18170+ "4: rep; "prefix"movsb\n" \
18171 " movl %3,%0\n" \
18172 " shrl $2,%0\n" \
18173 " andl $3,%3\n" \
18174 " .align 2,0x90\n" \
18175- "0: rep; movsl\n" \
18176+ "0: rep; "prefix"movsl\n" \
18177 " movl %3,%0\n" \
18178- "1: rep; movsb\n" \
18179+ "1: rep; "prefix"movsb\n" \
18180 "2:\n" \
18181+ restore \
18182 ".section .fixup,\"ax\"\n" \
18183 "5: addl %3,%0\n" \
18184 " jmp 2b\n" \
18185@@ -682,14 +799,14 @@ do { \
18186 " negl %0\n" \
18187 " andl $7,%0\n" \
18188 " subl %0,%3\n" \
18189- "4: rep; movsb\n" \
18190+ "4: rep; "__copyuser_seg"movsb\n" \
18191 " movl %3,%0\n" \
18192 " shrl $2,%0\n" \
18193 " andl $3,%3\n" \
18194 " .align 2,0x90\n" \
18195- "0: rep; movsl\n" \
18196+ "0: rep; "__copyuser_seg"movsl\n" \
18197 " movl %3,%0\n" \
18198- "1: rep; movsb\n" \
18199+ "1: rep; "__copyuser_seg"movsb\n" \
18200 "2:\n" \
18201 ".section .fixup,\"ax\"\n" \
18202 "5: addl %3,%0\n" \
18203@@ -775,9 +892,9 @@ survive:
18204 }
18205 #endif
18206 if (movsl_is_ok(to, from, n))
18207- __copy_user(to, from, n);
18208+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18209 else
18210- n = __copy_user_intel(to, from, n);
18211+ n = __generic_copy_to_user_intel(to, from, n);
18212 return n;
18213 }
18214 EXPORT_SYMBOL(__copy_to_user_ll);
18215@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18216 unsigned long n)
18217 {
18218 if (movsl_is_ok(to, from, n))
18219- __copy_user(to, from, n);
18220+ __copy_user(to, from, n, __copyuser_seg, "", "");
18221 else
18222- n = __copy_user_intel((void __user *)to,
18223- (const void *)from, n);
18224+ n = __generic_copy_from_user_intel(to, from, n);
18225 return n;
18226 }
18227 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18228@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18229 if (n > 64 && cpu_has_xmm2)
18230 n = __copy_user_intel_nocache(to, from, n);
18231 else
18232- __copy_user(to, from, n);
18233+ __copy_user(to, from, n, __copyuser_seg, "", "");
18234 #else
18235- __copy_user(to, from, n);
18236+ __copy_user(to, from, n, __copyuser_seg, "", "");
18237 #endif
18238 return n;
18239 }
18240 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18241
18242-/**
18243- * copy_to_user: - Copy a block of data into user space.
18244- * @to: Destination address, in user space.
18245- * @from: Source address, in kernel space.
18246- * @n: Number of bytes to copy.
18247- *
18248- * Context: User context only. This function may sleep.
18249- *
18250- * Copy data from kernel space to user space.
18251- *
18252- * Returns number of bytes that could not be copied.
18253- * On success, this will be zero.
18254- */
18255-unsigned long
18256-copy_to_user(void __user *to, const void *from, unsigned long n)
18257+void copy_from_user_overflow(void)
18258 {
18259- if (access_ok(VERIFY_WRITE, to, n))
18260- n = __copy_to_user(to, from, n);
18261- return n;
18262+ WARN(1, "Buffer overflow detected!\n");
18263 }
18264-EXPORT_SYMBOL(copy_to_user);
18265+EXPORT_SYMBOL(copy_from_user_overflow);
18266
18267-/**
18268- * copy_from_user: - Copy a block of data from user space.
18269- * @to: Destination address, in kernel space.
18270- * @from: Source address, in user space.
18271- * @n: Number of bytes to copy.
18272- *
18273- * Context: User context only. This function may sleep.
18274- *
18275- * Copy data from user space to kernel space.
18276- *
18277- * Returns number of bytes that could not be copied.
18278- * On success, this will be zero.
18279- *
18280- * If some data could not be copied, this function will pad the copied
18281- * data to the requested size using zero bytes.
18282- */
18283-unsigned long
18284-_copy_from_user(void *to, const void __user *from, unsigned long n)
18285+void copy_to_user_overflow(void)
18286 {
18287- if (access_ok(VERIFY_READ, from, n))
18288- n = __copy_from_user(to, from, n);
18289- else
18290- memset(to, 0, n);
18291- return n;
18292+ WARN(1, "Buffer overflow detected!\n");
18293 }
18294-EXPORT_SYMBOL(_copy_from_user);
18295+EXPORT_SYMBOL(copy_to_user_overflow);
18296
18297-void copy_from_user_overflow(void)
18298+#ifdef CONFIG_PAX_MEMORY_UDEREF
18299+void __set_fs(mm_segment_t x)
18300 {
18301- WARN(1, "Buffer overflow detected!\n");
18302+ switch (x.seg) {
18303+ case 0:
18304+ loadsegment(gs, 0);
18305+ break;
18306+ case TASK_SIZE_MAX:
18307+ loadsegment(gs, __USER_DS);
18308+ break;
18309+ case -1UL:
18310+ loadsegment(gs, __KERNEL_DS);
18311+ break;
18312+ default:
18313+ BUG();
18314+ }
18315+ return;
18316 }
18317-EXPORT_SYMBOL(copy_from_user_overflow);
18318+EXPORT_SYMBOL(__set_fs);
18319+
18320+void set_fs(mm_segment_t x)
18321+{
18322+ current_thread_info()->addr_limit = x;
18323+ __set_fs(x);
18324+}
18325+EXPORT_SYMBOL(set_fs);
18326+#endif
18327diff -urNp linux-3.0.4/arch/x86/lib/usercopy_64.c linux-3.0.4/arch/x86/lib/usercopy_64.c
18328--- linux-3.0.4/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
18329+++ linux-3.0.4/arch/x86/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
18330@@ -42,6 +42,12 @@ long
18331 __strncpy_from_user(char *dst, const char __user *src, long count)
18332 {
18333 long res;
18334+
18335+#ifdef CONFIG_PAX_MEMORY_UDEREF
18336+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18337+ src += PAX_USER_SHADOW_BASE;
18338+#endif
18339+
18340 __do_strncpy_from_user(dst, src, count, res);
18341 return res;
18342 }
18343@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18344 {
18345 long __d0;
18346 might_fault();
18347+
18348+#ifdef CONFIG_PAX_MEMORY_UDEREF
18349+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18350+ addr += PAX_USER_SHADOW_BASE;
18351+#endif
18352+
18353 /* no memory constraint because it doesn't change any memory gcc knows
18354 about */
18355 asm volatile(
18356@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18357
18358 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18359 {
18360- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18361+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18362+
18363+#ifdef CONFIG_PAX_MEMORY_UDEREF
18364+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18365+ to += PAX_USER_SHADOW_BASE;
18366+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18367+ from += PAX_USER_SHADOW_BASE;
18368+#endif
18369+
18370 return copy_user_generic((__force void *)to, (__force void *)from, len);
18371- }
18372- return len;
18373+ }
18374+ return len;
18375 }
18376 EXPORT_SYMBOL(copy_in_user);
18377
18378diff -urNp linux-3.0.4/arch/x86/Makefile linux-3.0.4/arch/x86/Makefile
18379--- linux-3.0.4/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
18380+++ linux-3.0.4/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
18381@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18382 else
18383 BITS := 64
18384 UTS_MACHINE := x86_64
18385+ biarch := $(call cc-option,-m64)
18386 CHECKFLAGS += -D__x86_64__ -m64
18387
18388 KBUILD_AFLAGS += -m64
18389@@ -195,3 +196,12 @@ define archhelp
18390 echo ' FDARGS="..." arguments for the booted kernel'
18391 echo ' FDINITRD=file initrd for the booted kernel'
18392 endef
18393+
18394+define OLD_LD
18395+
18396+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18397+*** Please upgrade your binutils to 2.18 or newer
18398+endef
18399+
18400+archprepare:
18401+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18402diff -urNp linux-3.0.4/arch/x86/mm/extable.c linux-3.0.4/arch/x86/mm/extable.c
18403--- linux-3.0.4/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
18404+++ linux-3.0.4/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
18405@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
18406 const struct exception_table_entry *fixup;
18407
18408 #ifdef CONFIG_PNPBIOS
18409- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18410+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18411 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18412 extern u32 pnp_bios_is_utter_crap;
18413 pnp_bios_is_utter_crap = 1;
18414diff -urNp linux-3.0.4/arch/x86/mm/fault.c linux-3.0.4/arch/x86/mm/fault.c
18415--- linux-3.0.4/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
18416+++ linux-3.0.4/arch/x86/mm/fault.c 2011-08-23 21:48:14.000000000 -0400
18417@@ -13,10 +13,18 @@
18418 #include <linux/perf_event.h> /* perf_sw_event */
18419 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18420 #include <linux/prefetch.h> /* prefetchw */
18421+#include <linux/unistd.h>
18422+#include <linux/compiler.h>
18423
18424 #include <asm/traps.h> /* dotraplinkage, ... */
18425 #include <asm/pgalloc.h> /* pgd_*(), ... */
18426 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18427+#include <asm/vsyscall.h>
18428+#include <asm/tlbflush.h>
18429+
18430+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18431+#include <asm/stacktrace.h>
18432+#endif
18433
18434 /*
18435 * Page fault error code bits:
18436@@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
18437 int ret = 0;
18438
18439 /* kprobe_running() needs smp_processor_id() */
18440- if (kprobes_built_in() && !user_mode_vm(regs)) {
18441+ if (kprobes_built_in() && !user_mode(regs)) {
18442 preempt_disable();
18443 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18444 ret = 1;
18445@@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
18446 return !instr_lo || (instr_lo>>1) == 1;
18447 case 0x00:
18448 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18449- if (probe_kernel_address(instr, opcode))
18450+ if (user_mode(regs)) {
18451+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18452+ return 0;
18453+ } else if (probe_kernel_address(instr, opcode))
18454 return 0;
18455
18456 *prefetch = (instr_lo == 0xF) &&
18457@@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
18458 while (instr < max_instr) {
18459 unsigned char opcode;
18460
18461- if (probe_kernel_address(instr, opcode))
18462+ if (user_mode(regs)) {
18463+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18464+ break;
18465+ } else if (probe_kernel_address(instr, opcode))
18466 break;
18467
18468 instr++;
18469@@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
18470 force_sig_info(si_signo, &info, tsk);
18471 }
18472
18473+#ifdef CONFIG_PAX_EMUTRAMP
18474+static int pax_handle_fetch_fault(struct pt_regs *regs);
18475+#endif
18476+
18477+#ifdef CONFIG_PAX_PAGEEXEC
18478+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18479+{
18480+ pgd_t *pgd;
18481+ pud_t *pud;
18482+ pmd_t *pmd;
18483+
18484+ pgd = pgd_offset(mm, address);
18485+ if (!pgd_present(*pgd))
18486+ return NULL;
18487+ pud = pud_offset(pgd, address);
18488+ if (!pud_present(*pud))
18489+ return NULL;
18490+ pmd = pmd_offset(pud, address);
18491+ if (!pmd_present(*pmd))
18492+ return NULL;
18493+ return pmd;
18494+}
18495+#endif
18496+
18497 DEFINE_SPINLOCK(pgd_lock);
18498 LIST_HEAD(pgd_list);
18499
18500@@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
18501 for (address = VMALLOC_START & PMD_MASK;
18502 address >= TASK_SIZE && address < FIXADDR_TOP;
18503 address += PMD_SIZE) {
18504+
18505+#ifdef CONFIG_PAX_PER_CPU_PGD
18506+ unsigned long cpu;
18507+#else
18508 struct page *page;
18509+#endif
18510
18511 spin_lock(&pgd_lock);
18512+
18513+#ifdef CONFIG_PAX_PER_CPU_PGD
18514+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18515+ pgd_t *pgd = get_cpu_pgd(cpu);
18516+ pmd_t *ret;
18517+#else
18518 list_for_each_entry(page, &pgd_list, lru) {
18519+ pgd_t *pgd = page_address(page);
18520 spinlock_t *pgt_lock;
18521 pmd_t *ret;
18522
18523@@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
18524 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18525
18526 spin_lock(pgt_lock);
18527- ret = vmalloc_sync_one(page_address(page), address);
18528+#endif
18529+
18530+ ret = vmalloc_sync_one(pgd, address);
18531+
18532+#ifndef CONFIG_PAX_PER_CPU_PGD
18533 spin_unlock(pgt_lock);
18534+#endif
18535
18536 if (!ret)
18537 break;
18538@@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
18539 * an interrupt in the middle of a task switch..
18540 */
18541 pgd_paddr = read_cr3();
18542+
18543+#ifdef CONFIG_PAX_PER_CPU_PGD
18544+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
18545+#endif
18546+
18547 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
18548 if (!pmd_k)
18549 return -1;
18550@@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
18551 * happen within a race in page table update. In the later
18552 * case just flush:
18553 */
18554+
18555+#ifdef CONFIG_PAX_PER_CPU_PGD
18556+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
18557+ pgd = pgd_offset_cpu(smp_processor_id(), address);
18558+#else
18559 pgd = pgd_offset(current->active_mm, address);
18560+#endif
18561+
18562 pgd_ref = pgd_offset_k(address);
18563 if (pgd_none(*pgd_ref))
18564 return -1;
18565@@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
18566 static int is_errata100(struct pt_regs *regs, unsigned long address)
18567 {
18568 #ifdef CONFIG_X86_64
18569- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
18570+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
18571 return 1;
18572 #endif
18573 return 0;
18574@@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
18575 }
18576
18577 static const char nx_warning[] = KERN_CRIT
18578-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
18579+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
18580
18581 static void
18582 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
18583@@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
18584 if (!oops_may_print())
18585 return;
18586
18587- if (error_code & PF_INSTR) {
18588+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
18589 unsigned int level;
18590
18591 pte_t *pte = lookup_address(address, &level);
18592
18593 if (pte && pte_present(*pte) && !pte_exec(*pte))
18594- printk(nx_warning, current_uid());
18595+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
18596+ }
18597+
18598+#ifdef CONFIG_PAX_KERNEXEC
18599+ if (init_mm.start_code <= address && address < init_mm.end_code) {
18600+ if (current->signal->curr_ip)
18601+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18602+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
18603+ else
18604+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18605+ current->comm, task_pid_nr(current), current_uid(), current_euid());
18606 }
18607+#endif
18608
18609 printk(KERN_ALERT "BUG: unable to handle kernel ");
18610 if (address < PAGE_SIZE)
18611@@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
18612 unsigned long address, int si_code)
18613 {
18614 struct task_struct *tsk = current;
18615+#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18616+ struct mm_struct *mm = tsk->mm;
18617+#endif
18618+
18619+#ifdef CONFIG_X86_64
18620+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
18621+ if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
18622+ regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
18623+ regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
18624+ regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
18625+ return;
18626+ }
18627+ }
18628+#endif
18629+
18630+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18631+ if (mm && (error_code & PF_USER)) {
18632+ unsigned long ip = regs->ip;
18633+
18634+ if (v8086_mode(regs))
18635+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
18636+
18637+ /*
18638+ * It's possible to have interrupts off here:
18639+ */
18640+ local_irq_enable();
18641+
18642+#ifdef CONFIG_PAX_PAGEEXEC
18643+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
18644+ (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
18645+
18646+#ifdef CONFIG_PAX_EMUTRAMP
18647+ switch (pax_handle_fetch_fault(regs)) {
18648+ case 2:
18649+ return;
18650+ }
18651+#endif
18652+
18653+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18654+ do_group_exit(SIGKILL);
18655+ }
18656+#endif
18657+
18658+#ifdef CONFIG_PAX_SEGMEXEC
18659+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
18660+
18661+#ifdef CONFIG_PAX_EMUTRAMP
18662+ switch (pax_handle_fetch_fault(regs)) {
18663+ case 2:
18664+ return;
18665+ }
18666+#endif
18667+
18668+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18669+ do_group_exit(SIGKILL);
18670+ }
18671+#endif
18672+
18673+ }
18674+#endif
18675
18676 /* User mode accesses just cause a SIGSEGV */
18677 if (error_code & PF_USER) {
18678@@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
18679 return 1;
18680 }
18681
18682+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18683+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
18684+{
18685+ pte_t *pte;
18686+ pmd_t *pmd;
18687+ spinlock_t *ptl;
18688+ unsigned char pte_mask;
18689+
18690+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
18691+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
18692+ return 0;
18693+
18694+ /* PaX: it's our fault, let's handle it if we can */
18695+
18696+ /* PaX: take a look at read faults before acquiring any locks */
18697+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
18698+ /* instruction fetch attempt from a protected page in user mode */
18699+ up_read(&mm->mmap_sem);
18700+
18701+#ifdef CONFIG_PAX_EMUTRAMP
18702+ switch (pax_handle_fetch_fault(regs)) {
18703+ case 2:
18704+ return 1;
18705+ }
18706+#endif
18707+
18708+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
18709+ do_group_exit(SIGKILL);
18710+ }
18711+
18712+ pmd = pax_get_pmd(mm, address);
18713+ if (unlikely(!pmd))
18714+ return 0;
18715+
18716+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
18717+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
18718+ pte_unmap_unlock(pte, ptl);
18719+ return 0;
18720+ }
18721+
18722+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
18723+ /* write attempt to a protected page in user mode */
18724+ pte_unmap_unlock(pte, ptl);
18725+ return 0;
18726+ }
18727+
18728+#ifdef CONFIG_SMP
18729+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
18730+#else
18731+ if (likely(address > get_limit(regs->cs)))
18732+#endif
18733+ {
18734+ set_pte(pte, pte_mkread(*pte));
18735+ __flush_tlb_one(address);
18736+ pte_unmap_unlock(pte, ptl);
18737+ up_read(&mm->mmap_sem);
18738+ return 1;
18739+ }
18740+
18741+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
18742+
18743+ /*
18744+ * PaX: fill DTLB with user rights and retry
18745+ */
18746+ __asm__ __volatile__ (
18747+ "orb %2,(%1)\n"
18748+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
18749+/*
18750+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
18751+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
18752+ * page fault when examined during a TLB load attempt. this is true not only
18753+ * for PTEs holding a non-present entry but also present entries that will
18754+ * raise a page fault (such as those set up by PaX, or the copy-on-write
18755+ * mechanism). in effect it means that we do *not* need to flush the TLBs
18756+ * for our target pages since their PTEs are simply not in the TLBs at all.
18757+
18758+ * the best thing in omitting it is that we gain around 15-20% speed in the
18759+ * fast path of the page fault handler and can get rid of tracing since we
18760+ * can no longer flush unintended entries.
18761+ */
18762+ "invlpg (%0)\n"
18763+#endif
18764+ __copyuser_seg"testb $0,(%0)\n"
18765+ "xorb %3,(%1)\n"
18766+ :
18767+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
18768+ : "memory", "cc");
18769+ pte_unmap_unlock(pte, ptl);
18770+ up_read(&mm->mmap_sem);
18771+ return 1;
18772+}
18773+#endif
18774+
18775 /*
18776 * Handle a spurious fault caused by a stale TLB entry.
18777 *
18778@@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
18779 static inline int
18780 access_error(unsigned long error_code, struct vm_area_struct *vma)
18781 {
18782+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
18783+ return 1;
18784+
18785 if (error_code & PF_WRITE) {
18786 /* write, present and write, not present: */
18787 if (unlikely(!(vma->vm_flags & VM_WRITE)))
18788@@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
18789 {
18790 struct vm_area_struct *vma;
18791 struct task_struct *tsk;
18792- unsigned long address;
18793 struct mm_struct *mm;
18794 int fault;
18795 int write = error_code & PF_WRITE;
18796 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
18797 (write ? FAULT_FLAG_WRITE : 0);
18798
18799+ /* Get the faulting address: */
18800+ unsigned long address = read_cr2();
18801+
18802+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18803+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
18804+ if (!search_exception_tables(regs->ip)) {
18805+ bad_area_nosemaphore(regs, error_code, address);
18806+ return;
18807+ }
18808+ if (address < PAX_USER_SHADOW_BASE) {
18809+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
18810+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
18811+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
18812+ } else
18813+ address -= PAX_USER_SHADOW_BASE;
18814+ }
18815+#endif
18816+
18817 tsk = current;
18818 mm = tsk->mm;
18819
18820- /* Get the faulting address: */
18821- address = read_cr2();
18822-
18823 /*
18824 * Detect and handle instructions that would cause a page fault for
18825 * both a tracked kernel page and a userspace page.
18826@@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
18827 * User-mode registers count as a user access even for any
18828 * potential system fault or CPU buglet:
18829 */
18830- if (user_mode_vm(regs)) {
18831+ if (user_mode(regs)) {
18832 local_irq_enable();
18833 error_code |= PF_USER;
18834 } else {
18835@@ -1103,6 +1351,11 @@ retry:
18836 might_sleep();
18837 }
18838
18839+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18840+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
18841+ return;
18842+#endif
18843+
18844 vma = find_vma(mm, address);
18845 if (unlikely(!vma)) {
18846 bad_area(regs, error_code, address);
18847@@ -1114,18 +1367,24 @@ retry:
18848 bad_area(regs, error_code, address);
18849 return;
18850 }
18851- if (error_code & PF_USER) {
18852- /*
18853- * Accessing the stack below %sp is always a bug.
18854- * The large cushion allows instructions like enter
18855- * and pusha to work. ("enter $65535, $31" pushes
18856- * 32 pointers and then decrements %sp by 65535.)
18857- */
18858- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
18859- bad_area(regs, error_code, address);
18860- return;
18861- }
18862+ /*
18863+ * Accessing the stack below %sp is always a bug.
18864+ * The large cushion allows instructions like enter
18865+ * and pusha to work. ("enter $65535, $31" pushes
18866+ * 32 pointers and then decrements %sp by 65535.)
18867+ */
18868+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
18869+ bad_area(regs, error_code, address);
18870+ return;
18871 }
18872+
18873+#ifdef CONFIG_PAX_SEGMEXEC
18874+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
18875+ bad_area(regs, error_code, address);
18876+ return;
18877+ }
18878+#endif
18879+
18880 if (unlikely(expand_stack(vma, address))) {
18881 bad_area(regs, error_code, address);
18882 return;
18883@@ -1180,3 +1439,199 @@ good_area:
18884
18885 up_read(&mm->mmap_sem);
18886 }
18887+
18888+#ifdef CONFIG_PAX_EMUTRAMP
18889+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
18890+{
18891+ int err;
18892+
18893+ do { /* PaX: gcc trampoline emulation #1 */
18894+ unsigned char mov1, mov2;
18895+ unsigned short jmp;
18896+ unsigned int addr1, addr2;
18897+
18898+#ifdef CONFIG_X86_64
18899+ if ((regs->ip + 11) >> 32)
18900+ break;
18901+#endif
18902+
18903+ err = get_user(mov1, (unsigned char __user *)regs->ip);
18904+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18905+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
18906+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18907+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
18908+
18909+ if (err)
18910+ break;
18911+
18912+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
18913+ regs->cx = addr1;
18914+ regs->ax = addr2;
18915+ regs->ip = addr2;
18916+ return 2;
18917+ }
18918+ } while (0);
18919+
18920+ do { /* PaX: gcc trampoline emulation #2 */
18921+ unsigned char mov, jmp;
18922+ unsigned int addr1, addr2;
18923+
18924+#ifdef CONFIG_X86_64
18925+ if ((regs->ip + 9) >> 32)
18926+ break;
18927+#endif
18928+
18929+ err = get_user(mov, (unsigned char __user *)regs->ip);
18930+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18931+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
18932+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18933+
18934+ if (err)
18935+ break;
18936+
18937+ if (mov == 0xB9 && jmp == 0xE9) {
18938+ regs->cx = addr1;
18939+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
18940+ return 2;
18941+ }
18942+ } while (0);
18943+
18944+ return 1; /* PaX in action */
18945+}
18946+
18947+#ifdef CONFIG_X86_64
18948+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
18949+{
18950+ int err;
18951+
18952+ do { /* PaX: gcc trampoline emulation #1 */
18953+ unsigned short mov1, mov2, jmp1;
18954+ unsigned char jmp2;
18955+ unsigned int addr1;
18956+ unsigned long addr2;
18957+
18958+ err = get_user(mov1, (unsigned short __user *)regs->ip);
18959+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
18960+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
18961+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
18962+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
18963+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
18964+
18965+ if (err)
18966+ break;
18967+
18968+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18969+ regs->r11 = addr1;
18970+ regs->r10 = addr2;
18971+ regs->ip = addr1;
18972+ return 2;
18973+ }
18974+ } while (0);
18975+
18976+ do { /* PaX: gcc trampoline emulation #2 */
18977+ unsigned short mov1, mov2, jmp1;
18978+ unsigned char jmp2;
18979+ unsigned long addr1, addr2;
18980+
18981+ err = get_user(mov1, (unsigned short __user *)regs->ip);
18982+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
18983+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
18984+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
18985+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
18986+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
18987+
18988+ if (err)
18989+ break;
18990+
18991+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18992+ regs->r11 = addr1;
18993+ regs->r10 = addr2;
18994+ regs->ip = addr1;
18995+ return 2;
18996+ }
18997+ } while (0);
18998+
18999+ return 1; /* PaX in action */
19000+}
19001+#endif
19002+
19003+/*
19004+ * PaX: decide what to do with offenders (regs->ip = fault address)
19005+ *
19006+ * returns 1 when task should be killed
19007+ * 2 when gcc trampoline was detected
19008+ */
19009+static int pax_handle_fetch_fault(struct pt_regs *regs)
19010+{
19011+ if (v8086_mode(regs))
19012+ return 1;
19013+
19014+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19015+ return 1;
19016+
19017+#ifdef CONFIG_X86_32
19018+ return pax_handle_fetch_fault_32(regs);
19019+#else
19020+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19021+ return pax_handle_fetch_fault_32(regs);
19022+ else
19023+ return pax_handle_fetch_fault_64(regs);
19024+#endif
19025+}
19026+#endif
19027+
19028+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19029+void pax_report_insns(void *pc, void *sp)
19030+{
19031+ long i;
19032+
19033+ printk(KERN_ERR "PAX: bytes at PC: ");
19034+ for (i = 0; i < 20; i++) {
19035+ unsigned char c;
19036+ if (get_user(c, (__force unsigned char __user *)pc+i))
19037+ printk(KERN_CONT "?? ");
19038+ else
19039+ printk(KERN_CONT "%02x ", c);
19040+ }
19041+ printk("\n");
19042+
19043+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19044+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
19045+ unsigned long c;
19046+ if (get_user(c, (__force unsigned long __user *)sp+i))
19047+#ifdef CONFIG_X86_32
19048+ printk(KERN_CONT "???????? ");
19049+#else
19050+ printk(KERN_CONT "???????????????? ");
19051+#endif
19052+ else
19053+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19054+ }
19055+ printk("\n");
19056+}
19057+#endif
19058+
19059+/**
19060+ * probe_kernel_write(): safely attempt to write to a location
19061+ * @dst: address to write to
19062+ * @src: pointer to the data that shall be written
19063+ * @size: size of the data chunk
19064+ *
19065+ * Safely write to address @dst from the buffer at @src. If a kernel fault
19066+ * happens, handle that and return -EFAULT.
19067+ */
19068+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19069+{
19070+ long ret;
19071+ mm_segment_t old_fs = get_fs();
19072+
19073+ set_fs(KERNEL_DS);
19074+ pagefault_disable();
19075+ pax_open_kernel();
19076+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19077+ pax_close_kernel();
19078+ pagefault_enable();
19079+ set_fs(old_fs);
19080+
19081+ return ret ? -EFAULT : 0;
19082+}
19083diff -urNp linux-3.0.4/arch/x86/mm/gup.c linux-3.0.4/arch/x86/mm/gup.c
19084--- linux-3.0.4/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
19085+++ linux-3.0.4/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
19086@@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19087 addr = start;
19088 len = (unsigned long) nr_pages << PAGE_SHIFT;
19089 end = start + len;
19090- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19091+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19092 (void __user *)start, len)))
19093 return 0;
19094
19095diff -urNp linux-3.0.4/arch/x86/mm/highmem_32.c linux-3.0.4/arch/x86/mm/highmem_32.c
19096--- linux-3.0.4/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
19097+++ linux-3.0.4/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
19098@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19099 idx = type + KM_TYPE_NR*smp_processor_id();
19100 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19101 BUG_ON(!pte_none(*(kmap_pte-idx)));
19102+
19103+ pax_open_kernel();
19104 set_pte(kmap_pte-idx, mk_pte(page, prot));
19105+ pax_close_kernel();
19106
19107 return (void *)vaddr;
19108 }
19109diff -urNp linux-3.0.4/arch/x86/mm/hugetlbpage.c linux-3.0.4/arch/x86/mm/hugetlbpage.c
19110--- linux-3.0.4/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
19111+++ linux-3.0.4/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
19112@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19113 struct hstate *h = hstate_file(file);
19114 struct mm_struct *mm = current->mm;
19115 struct vm_area_struct *vma;
19116- unsigned long start_addr;
19117+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19118+
19119+#ifdef CONFIG_PAX_SEGMEXEC
19120+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19121+ pax_task_size = SEGMEXEC_TASK_SIZE;
19122+#endif
19123+
19124+ pax_task_size -= PAGE_SIZE;
19125
19126 if (len > mm->cached_hole_size) {
19127- start_addr = mm->free_area_cache;
19128+ start_addr = mm->free_area_cache;
19129 } else {
19130- start_addr = TASK_UNMAPPED_BASE;
19131- mm->cached_hole_size = 0;
19132+ start_addr = mm->mmap_base;
19133+ mm->cached_hole_size = 0;
19134 }
19135
19136 full_search:
19137@@ -280,26 +287,27 @@ full_search:
19138
19139 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19140 /* At this point: (!vma || addr < vma->vm_end). */
19141- if (TASK_SIZE - len < addr) {
19142+ if (pax_task_size - len < addr) {
19143 /*
19144 * Start a new search - just in case we missed
19145 * some holes.
19146 */
19147- if (start_addr != TASK_UNMAPPED_BASE) {
19148- start_addr = TASK_UNMAPPED_BASE;
19149+ if (start_addr != mm->mmap_base) {
19150+ start_addr = mm->mmap_base;
19151 mm->cached_hole_size = 0;
19152 goto full_search;
19153 }
19154 return -ENOMEM;
19155 }
19156- if (!vma || addr + len <= vma->vm_start) {
19157- mm->free_area_cache = addr + len;
19158- return addr;
19159- }
19160+ if (check_heap_stack_gap(vma, addr, len))
19161+ break;
19162 if (addr + mm->cached_hole_size < vma->vm_start)
19163 mm->cached_hole_size = vma->vm_start - addr;
19164 addr = ALIGN(vma->vm_end, huge_page_size(h));
19165 }
19166+
19167+ mm->free_area_cache = addr + len;
19168+ return addr;
19169 }
19170
19171 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19172@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19173 {
19174 struct hstate *h = hstate_file(file);
19175 struct mm_struct *mm = current->mm;
19176- struct vm_area_struct *vma, *prev_vma;
19177- unsigned long base = mm->mmap_base, addr = addr0;
19178+ struct vm_area_struct *vma;
19179+ unsigned long base = mm->mmap_base, addr;
19180 unsigned long largest_hole = mm->cached_hole_size;
19181- int first_time = 1;
19182
19183 /* don't allow allocations above current base */
19184 if (mm->free_area_cache > base)
19185@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19186 largest_hole = 0;
19187 mm->free_area_cache = base;
19188 }
19189-try_again:
19190+
19191 /* make sure it can fit in the remaining address space */
19192 if (mm->free_area_cache < len)
19193 goto fail;
19194
19195 /* either no address requested or can't fit in requested address hole */
19196- addr = (mm->free_area_cache - len) & huge_page_mask(h);
19197+ addr = (mm->free_area_cache - len);
19198 do {
19199+ addr &= huge_page_mask(h);
19200+ vma = find_vma(mm, addr);
19201 /*
19202 * Lookup failure means no vma is above this address,
19203 * i.e. return with success:
19204- */
19205- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19206- return addr;
19207-
19208- /*
19209 * new region fits between prev_vma->vm_end and
19210 * vma->vm_start, use it:
19211 */
19212- if (addr + len <= vma->vm_start &&
19213- (!prev_vma || (addr >= prev_vma->vm_end))) {
19214+ if (check_heap_stack_gap(vma, addr, len)) {
19215 /* remember the address as a hint for next time */
19216- mm->cached_hole_size = largest_hole;
19217- return (mm->free_area_cache = addr);
19218- } else {
19219- /* pull free_area_cache down to the first hole */
19220- if (mm->free_area_cache == vma->vm_end) {
19221- mm->free_area_cache = vma->vm_start;
19222- mm->cached_hole_size = largest_hole;
19223- }
19224+ mm->cached_hole_size = largest_hole;
19225+ return (mm->free_area_cache = addr);
19226+ }
19227+ /* pull free_area_cache down to the first hole */
19228+ if (mm->free_area_cache == vma->vm_end) {
19229+ mm->free_area_cache = vma->vm_start;
19230+ mm->cached_hole_size = largest_hole;
19231 }
19232
19233 /* remember the largest hole we saw so far */
19234 if (addr + largest_hole < vma->vm_start)
19235- largest_hole = vma->vm_start - addr;
19236+ largest_hole = vma->vm_start - addr;
19237
19238 /* try just below the current vma->vm_start */
19239- addr = (vma->vm_start - len) & huge_page_mask(h);
19240- } while (len <= vma->vm_start);
19241+ addr = skip_heap_stack_gap(vma, len);
19242+ } while (!IS_ERR_VALUE(addr));
19243
19244 fail:
19245 /*
19246- * if hint left us with no space for the requested
19247- * mapping then try again:
19248- */
19249- if (first_time) {
19250- mm->free_area_cache = base;
19251- largest_hole = 0;
19252- first_time = 0;
19253- goto try_again;
19254- }
19255- /*
19256 * A failed mmap() very likely causes application failure,
19257 * so fall back to the bottom-up function here. This scenario
19258 * can happen with large stack limits and large mmap()
19259 * allocations.
19260 */
19261- mm->free_area_cache = TASK_UNMAPPED_BASE;
19262+
19263+#ifdef CONFIG_PAX_SEGMEXEC
19264+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19265+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19266+ else
19267+#endif
19268+
19269+ mm->mmap_base = TASK_UNMAPPED_BASE;
19270+
19271+#ifdef CONFIG_PAX_RANDMMAP
19272+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19273+ mm->mmap_base += mm->delta_mmap;
19274+#endif
19275+
19276+ mm->free_area_cache = mm->mmap_base;
19277 mm->cached_hole_size = ~0UL;
19278 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19279 len, pgoff, flags);
19280@@ -386,6 +392,7 @@ fail:
19281 /*
19282 * Restore the topdown base:
19283 */
19284+ mm->mmap_base = base;
19285 mm->free_area_cache = base;
19286 mm->cached_hole_size = ~0UL;
19287
19288@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19289 struct hstate *h = hstate_file(file);
19290 struct mm_struct *mm = current->mm;
19291 struct vm_area_struct *vma;
19292+ unsigned long pax_task_size = TASK_SIZE;
19293
19294 if (len & ~huge_page_mask(h))
19295 return -EINVAL;
19296- if (len > TASK_SIZE)
19297+
19298+#ifdef CONFIG_PAX_SEGMEXEC
19299+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19300+ pax_task_size = SEGMEXEC_TASK_SIZE;
19301+#endif
19302+
19303+ pax_task_size -= PAGE_SIZE;
19304+
19305+ if (len > pax_task_size)
19306 return -ENOMEM;
19307
19308 if (flags & MAP_FIXED) {
19309@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19310 if (addr) {
19311 addr = ALIGN(addr, huge_page_size(h));
19312 vma = find_vma(mm, addr);
19313- if (TASK_SIZE - len >= addr &&
19314- (!vma || addr + len <= vma->vm_start))
19315+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19316 return addr;
19317 }
19318 if (mm->get_unmapped_area == arch_get_unmapped_area)
19319diff -urNp linux-3.0.4/arch/x86/mm/init_32.c linux-3.0.4/arch/x86/mm/init_32.c
19320--- linux-3.0.4/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
19321+++ linux-3.0.4/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
19322@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19323 }
19324
19325 /*
19326- * Creates a middle page table and puts a pointer to it in the
19327- * given global directory entry. This only returns the gd entry
19328- * in non-PAE compilation mode, since the middle layer is folded.
19329- */
19330-static pmd_t * __init one_md_table_init(pgd_t *pgd)
19331-{
19332- pud_t *pud;
19333- pmd_t *pmd_table;
19334-
19335-#ifdef CONFIG_X86_PAE
19336- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19337- if (after_bootmem)
19338- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19339- else
19340- pmd_table = (pmd_t *)alloc_low_page();
19341- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19342- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19343- pud = pud_offset(pgd, 0);
19344- BUG_ON(pmd_table != pmd_offset(pud, 0));
19345-
19346- return pmd_table;
19347- }
19348-#endif
19349- pud = pud_offset(pgd, 0);
19350- pmd_table = pmd_offset(pud, 0);
19351-
19352- return pmd_table;
19353-}
19354-
19355-/*
19356 * Create a page table and place a pointer to it in a middle page
19357 * directory entry:
19358 */
19359@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19360 page_table = (pte_t *)alloc_low_page();
19361
19362 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19363+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19364+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19365+#else
19366 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19367+#endif
19368 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19369 }
19370
19371 return pte_offset_kernel(pmd, 0);
19372 }
19373
19374+static pmd_t * __init one_md_table_init(pgd_t *pgd)
19375+{
19376+ pud_t *pud;
19377+ pmd_t *pmd_table;
19378+
19379+ pud = pud_offset(pgd, 0);
19380+ pmd_table = pmd_offset(pud, 0);
19381+
19382+ return pmd_table;
19383+}
19384+
19385 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19386 {
19387 int pgd_idx = pgd_index(vaddr);
19388@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19389 int pgd_idx, pmd_idx;
19390 unsigned long vaddr;
19391 pgd_t *pgd;
19392+ pud_t *pud;
19393 pmd_t *pmd;
19394 pte_t *pte = NULL;
19395
19396@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19397 pgd = pgd_base + pgd_idx;
19398
19399 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19400- pmd = one_md_table_init(pgd);
19401- pmd = pmd + pmd_index(vaddr);
19402+ pud = pud_offset(pgd, vaddr);
19403+ pmd = pmd_offset(pud, vaddr);
19404+
19405+#ifdef CONFIG_X86_PAE
19406+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19407+#endif
19408+
19409 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19410 pmd++, pmd_idx++) {
19411 pte = page_table_kmap_check(one_page_table_init(pmd),
19412@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19413 }
19414 }
19415
19416-static inline int is_kernel_text(unsigned long addr)
19417+static inline int is_kernel_text(unsigned long start, unsigned long end)
19418 {
19419- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19420- return 1;
19421- return 0;
19422+ if ((start > ktla_ktva((unsigned long)_etext) ||
19423+ end <= ktla_ktva((unsigned long)_stext)) &&
19424+ (start > ktla_ktva((unsigned long)_einittext) ||
19425+ end <= ktla_ktva((unsigned long)_sinittext)) &&
19426+
19427+#ifdef CONFIG_ACPI_SLEEP
19428+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19429+#endif
19430+
19431+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19432+ return 0;
19433+ return 1;
19434 }
19435
19436 /*
19437@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19438 unsigned long last_map_addr = end;
19439 unsigned long start_pfn, end_pfn;
19440 pgd_t *pgd_base = swapper_pg_dir;
19441- int pgd_idx, pmd_idx, pte_ofs;
19442+ unsigned int pgd_idx, pmd_idx, pte_ofs;
19443 unsigned long pfn;
19444 pgd_t *pgd;
19445+ pud_t *pud;
19446 pmd_t *pmd;
19447 pte_t *pte;
19448 unsigned pages_2m, pages_4k;
19449@@ -281,8 +282,13 @@ repeat:
19450 pfn = start_pfn;
19451 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19452 pgd = pgd_base + pgd_idx;
19453- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19454- pmd = one_md_table_init(pgd);
19455+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19456+ pud = pud_offset(pgd, 0);
19457+ pmd = pmd_offset(pud, 0);
19458+
19459+#ifdef CONFIG_X86_PAE
19460+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19461+#endif
19462
19463 if (pfn >= end_pfn)
19464 continue;
19465@@ -294,14 +300,13 @@ repeat:
19466 #endif
19467 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19468 pmd++, pmd_idx++) {
19469- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19470+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19471
19472 /*
19473 * Map with big pages if possible, otherwise
19474 * create normal page tables:
19475 */
19476 if (use_pse) {
19477- unsigned int addr2;
19478 pgprot_t prot = PAGE_KERNEL_LARGE;
19479 /*
19480 * first pass will use the same initial
19481@@ -311,11 +316,7 @@ repeat:
19482 __pgprot(PTE_IDENT_ATTR |
19483 _PAGE_PSE);
19484
19485- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19486- PAGE_OFFSET + PAGE_SIZE-1;
19487-
19488- if (is_kernel_text(addr) ||
19489- is_kernel_text(addr2))
19490+ if (is_kernel_text(address, address + PMD_SIZE))
19491 prot = PAGE_KERNEL_LARGE_EXEC;
19492
19493 pages_2m++;
19494@@ -332,7 +333,7 @@ repeat:
19495 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19496 pte += pte_ofs;
19497 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19498- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19499+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19500 pgprot_t prot = PAGE_KERNEL;
19501 /*
19502 * first pass will use the same initial
19503@@ -340,7 +341,7 @@ repeat:
19504 */
19505 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19506
19507- if (is_kernel_text(addr))
19508+ if (is_kernel_text(address, address + PAGE_SIZE))
19509 prot = PAGE_KERNEL_EXEC;
19510
19511 pages_4k++;
19512@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19513
19514 pud = pud_offset(pgd, va);
19515 pmd = pmd_offset(pud, va);
19516- if (!pmd_present(*pmd))
19517+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
19518 break;
19519
19520 pte = pte_offset_kernel(pmd, va);
19521@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19522
19523 static void __init pagetable_init(void)
19524 {
19525- pgd_t *pgd_base = swapper_pg_dir;
19526-
19527- permanent_kmaps_init(pgd_base);
19528+ permanent_kmaps_init(swapper_pg_dir);
19529 }
19530
19531-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19532+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19533 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19534
19535 /* user-defined highmem size */
19536@@ -757,6 +756,12 @@ void __init mem_init(void)
19537
19538 pci_iommu_alloc();
19539
19540+#ifdef CONFIG_PAX_PER_CPU_PGD
19541+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19542+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19543+ KERNEL_PGD_PTRS);
19544+#endif
19545+
19546 #ifdef CONFIG_FLATMEM
19547 BUG_ON(!mem_map);
19548 #endif
19549@@ -774,7 +779,7 @@ void __init mem_init(void)
19550 set_highmem_pages_init();
19551
19552 codesize = (unsigned long) &_etext - (unsigned long) &_text;
19553- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
19554+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
19555 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
19556
19557 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
19558@@ -815,10 +820,10 @@ void __init mem_init(void)
19559 ((unsigned long)&__init_end -
19560 (unsigned long)&__init_begin) >> 10,
19561
19562- (unsigned long)&_etext, (unsigned long)&_edata,
19563- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
19564+ (unsigned long)&_sdata, (unsigned long)&_edata,
19565+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
19566
19567- (unsigned long)&_text, (unsigned long)&_etext,
19568+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
19569 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
19570
19571 /*
19572@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
19573 if (!kernel_set_to_readonly)
19574 return;
19575
19576+ start = ktla_ktva(start);
19577 pr_debug("Set kernel text: %lx - %lx for read write\n",
19578 start, start+size);
19579
19580@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
19581 if (!kernel_set_to_readonly)
19582 return;
19583
19584+ start = ktla_ktva(start);
19585 pr_debug("Set kernel text: %lx - %lx for read only\n",
19586 start, start+size);
19587
19588@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
19589 unsigned long start = PFN_ALIGN(_text);
19590 unsigned long size = PFN_ALIGN(_etext) - start;
19591
19592+ start = ktla_ktva(start);
19593 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
19594 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
19595 size >> 10);
19596diff -urNp linux-3.0.4/arch/x86/mm/init_64.c linux-3.0.4/arch/x86/mm/init_64.c
19597--- linux-3.0.4/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
19598+++ linux-3.0.4/arch/x86/mm/init_64.c 2011-08-23 21:47:55.000000000 -0400
19599@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
19600 * around without checking the pgd every time.
19601 */
19602
19603-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
19604+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
19605 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19606
19607 int force_personality32;
19608@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
19609
19610 for (address = start; address <= end; address += PGDIR_SIZE) {
19611 const pgd_t *pgd_ref = pgd_offset_k(address);
19612+
19613+#ifdef CONFIG_PAX_PER_CPU_PGD
19614+ unsigned long cpu;
19615+#else
19616 struct page *page;
19617+#endif
19618
19619 if (pgd_none(*pgd_ref))
19620 continue;
19621
19622 spin_lock(&pgd_lock);
19623+
19624+#ifdef CONFIG_PAX_PER_CPU_PGD
19625+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19626+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
19627+#else
19628 list_for_each_entry(page, &pgd_list, lru) {
19629 pgd_t *pgd;
19630 spinlock_t *pgt_lock;
19631@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
19632 /* the pgt_lock only for Xen */
19633 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19634 spin_lock(pgt_lock);
19635+#endif
19636
19637 if (pgd_none(*pgd))
19638 set_pgd(pgd, *pgd_ref);
19639@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
19640 BUG_ON(pgd_page_vaddr(*pgd)
19641 != pgd_page_vaddr(*pgd_ref));
19642
19643+#ifndef CONFIG_PAX_PER_CPU_PGD
19644 spin_unlock(pgt_lock);
19645+#endif
19646+
19647 }
19648 spin_unlock(&pgd_lock);
19649 }
19650@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
19651 pmd = fill_pmd(pud, vaddr);
19652 pte = fill_pte(pmd, vaddr);
19653
19654+ pax_open_kernel();
19655 set_pte(pte, new_pte);
19656+ pax_close_kernel();
19657
19658 /*
19659 * It's enough to flush this one mapping.
19660@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
19661 pgd = pgd_offset_k((unsigned long)__va(phys));
19662 if (pgd_none(*pgd)) {
19663 pud = (pud_t *) spp_getpage();
19664- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
19665- _PAGE_USER));
19666+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
19667 }
19668 pud = pud_offset(pgd, (unsigned long)__va(phys));
19669 if (pud_none(*pud)) {
19670 pmd = (pmd_t *) spp_getpage();
19671- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
19672- _PAGE_USER));
19673+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
19674 }
19675 pmd = pmd_offset(pud, phys);
19676 BUG_ON(!pmd_none(*pmd));
19677@@ -693,6 +707,12 @@ void __init mem_init(void)
19678
19679 pci_iommu_alloc();
19680
19681+#ifdef CONFIG_PAX_PER_CPU_PGD
19682+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19683+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19684+ KERNEL_PGD_PTRS);
19685+#endif
19686+
19687 /* clear_bss() already clear the empty_zero_page */
19688
19689 reservedpages = 0;
19690@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
19691 static struct vm_area_struct gate_vma = {
19692 .vm_start = VSYSCALL_START,
19693 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
19694- .vm_page_prot = PAGE_READONLY_EXEC,
19695- .vm_flags = VM_READ | VM_EXEC
19696+ .vm_page_prot = PAGE_READONLY,
19697+ .vm_flags = VM_READ
19698 };
19699
19700 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
19701@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
19702
19703 const char *arch_vma_name(struct vm_area_struct *vma)
19704 {
19705- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19706+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19707 return "[vdso]";
19708 if (vma == &gate_vma)
19709 return "[vsyscall]";
19710diff -urNp linux-3.0.4/arch/x86/mm/init.c linux-3.0.4/arch/x86/mm/init.c
19711--- linux-3.0.4/arch/x86/mm/init.c 2011-07-21 22:17:23.000000000 -0400
19712+++ linux-3.0.4/arch/x86/mm/init.c 2011-08-23 21:48:14.000000000 -0400
19713@@ -31,7 +31,7 @@ int direct_gbpages
19714 static void __init find_early_table_space(unsigned long end, int use_pse,
19715 int use_gbpages)
19716 {
19717- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
19718+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
19719 phys_addr_t base;
19720
19721 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
19722@@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
19723 */
19724 int devmem_is_allowed(unsigned long pagenr)
19725 {
19726- if (pagenr <= 256)
19727+#ifdef CONFIG_GRKERNSEC_KMEM
19728+ /* allow BDA */
19729+ if (!pagenr)
19730+ return 1;
19731+ /* allow EBDA */
19732+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
19733+ return 1;
19734+#else
19735+ if (!pagenr)
19736+ return 1;
19737+#ifdef CONFIG_VM86
19738+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
19739+ return 1;
19740+#endif
19741+#endif
19742+
19743+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
19744 return 1;
19745+#ifdef CONFIG_GRKERNSEC_KMEM
19746+ /* throw out everything else below 1MB */
19747+ if (pagenr <= 256)
19748+ return 0;
19749+#endif
19750 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
19751 return 0;
19752 if (!page_is_ram(pagenr))
19753 return 1;
19754+
19755 return 0;
19756 }
19757
19758@@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
19759
19760 void free_initmem(void)
19761 {
19762+
19763+#ifdef CONFIG_PAX_KERNEXEC
19764+#ifdef CONFIG_X86_32
19765+ /* PaX: limit KERNEL_CS to actual size */
19766+ unsigned long addr, limit;
19767+ struct desc_struct d;
19768+ int cpu;
19769+
19770+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
19771+ limit = (limit - 1UL) >> PAGE_SHIFT;
19772+
19773+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
19774+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
19775+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
19776+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
19777+ }
19778+
19779+ /* PaX: make KERNEL_CS read-only */
19780+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
19781+ if (!paravirt_enabled())
19782+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
19783+/*
19784+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
19785+ pgd = pgd_offset_k(addr);
19786+ pud = pud_offset(pgd, addr);
19787+ pmd = pmd_offset(pud, addr);
19788+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19789+ }
19790+*/
19791+#ifdef CONFIG_X86_PAE
19792+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
19793+/*
19794+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
19795+ pgd = pgd_offset_k(addr);
19796+ pud = pud_offset(pgd, addr);
19797+ pmd = pmd_offset(pud, addr);
19798+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19799+ }
19800+*/
19801+#endif
19802+
19803+#ifdef CONFIG_MODULES
19804+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
19805+#endif
19806+
19807+#else
19808+ pgd_t *pgd;
19809+ pud_t *pud;
19810+ pmd_t *pmd;
19811+ unsigned long addr, end;
19812+
19813+ /* PaX: make kernel code/rodata read-only, rest non-executable */
19814+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
19815+ pgd = pgd_offset_k(addr);
19816+ pud = pud_offset(pgd, addr);
19817+ pmd = pmd_offset(pud, addr);
19818+ if (!pmd_present(*pmd))
19819+ continue;
19820+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
19821+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19822+ else
19823+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19824+ }
19825+
19826+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
19827+ end = addr + KERNEL_IMAGE_SIZE;
19828+ for (; addr < end; addr += PMD_SIZE) {
19829+ pgd = pgd_offset_k(addr);
19830+ pud = pud_offset(pgd, addr);
19831+ pmd = pmd_offset(pud, addr);
19832+ if (!pmd_present(*pmd))
19833+ continue;
19834+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
19835+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19836+ }
19837+#endif
19838+
19839+ flush_tlb_all();
19840+#endif
19841+
19842 free_init_pages("unused kernel memory",
19843 (unsigned long)(&__init_begin),
19844 (unsigned long)(&__init_end));
19845diff -urNp linux-3.0.4/arch/x86/mm/iomap_32.c linux-3.0.4/arch/x86/mm/iomap_32.c
19846--- linux-3.0.4/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
19847+++ linux-3.0.4/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
19848@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
19849 type = kmap_atomic_idx_push();
19850 idx = type + KM_TYPE_NR * smp_processor_id();
19851 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19852+
19853+ pax_open_kernel();
19854 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
19855+ pax_close_kernel();
19856+
19857 arch_flush_lazy_mmu_mode();
19858
19859 return (void *)vaddr;
19860diff -urNp linux-3.0.4/arch/x86/mm/ioremap.c linux-3.0.4/arch/x86/mm/ioremap.c
19861--- linux-3.0.4/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
19862+++ linux-3.0.4/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
19863@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
19864 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
19865 int is_ram = page_is_ram(pfn);
19866
19867- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
19868+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
19869 return NULL;
19870 WARN_ON_ONCE(is_ram);
19871 }
19872@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
19873 early_param("early_ioremap_debug", early_ioremap_debug_setup);
19874
19875 static __initdata int after_paging_init;
19876-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
19877+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
19878
19879 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
19880 {
19881@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
19882 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
19883
19884 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
19885- memset(bm_pte, 0, sizeof(bm_pte));
19886- pmd_populate_kernel(&init_mm, pmd, bm_pte);
19887+ pmd_populate_user(&init_mm, pmd, bm_pte);
19888
19889 /*
19890 * The boot-ioremap range spans multiple pmds, for which
19891diff -urNp linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c
19892--- linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
19893+++ linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
19894@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
19895 * memory (e.g. tracked pages)? For now, we need this to avoid
19896 * invoking kmemcheck for PnP BIOS calls.
19897 */
19898- if (regs->flags & X86_VM_MASK)
19899+ if (v8086_mode(regs))
19900 return false;
19901- if (regs->cs != __KERNEL_CS)
19902+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
19903 return false;
19904
19905 pte = kmemcheck_pte_lookup(address);
19906diff -urNp linux-3.0.4/arch/x86/mm/mmap.c linux-3.0.4/arch/x86/mm/mmap.c
19907--- linux-3.0.4/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
19908+++ linux-3.0.4/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
19909@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
19910 * Leave an at least ~128 MB hole with possible stack randomization.
19911 */
19912 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
19913-#define MAX_GAP (TASK_SIZE/6*5)
19914+#define MAX_GAP (pax_task_size/6*5)
19915
19916 /*
19917 * True on X86_32 or when emulating IA32 on X86_64
19918@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
19919 return rnd << PAGE_SHIFT;
19920 }
19921
19922-static unsigned long mmap_base(void)
19923+static unsigned long mmap_base(struct mm_struct *mm)
19924 {
19925 unsigned long gap = rlimit(RLIMIT_STACK);
19926+ unsigned long pax_task_size = TASK_SIZE;
19927+
19928+#ifdef CONFIG_PAX_SEGMEXEC
19929+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19930+ pax_task_size = SEGMEXEC_TASK_SIZE;
19931+#endif
19932
19933 if (gap < MIN_GAP)
19934 gap = MIN_GAP;
19935 else if (gap > MAX_GAP)
19936 gap = MAX_GAP;
19937
19938- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
19939+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
19940 }
19941
19942 /*
19943 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
19944 * does, but not when emulating X86_32
19945 */
19946-static unsigned long mmap_legacy_base(void)
19947+static unsigned long mmap_legacy_base(struct mm_struct *mm)
19948 {
19949- if (mmap_is_ia32())
19950+ if (mmap_is_ia32()) {
19951+
19952+#ifdef CONFIG_PAX_SEGMEXEC
19953+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19954+ return SEGMEXEC_TASK_UNMAPPED_BASE;
19955+ else
19956+#endif
19957+
19958 return TASK_UNMAPPED_BASE;
19959- else
19960+ } else
19961 return TASK_UNMAPPED_BASE + mmap_rnd();
19962 }
19963
19964@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
19965 void arch_pick_mmap_layout(struct mm_struct *mm)
19966 {
19967 if (mmap_is_legacy()) {
19968- mm->mmap_base = mmap_legacy_base();
19969+ mm->mmap_base = mmap_legacy_base(mm);
19970+
19971+#ifdef CONFIG_PAX_RANDMMAP
19972+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19973+ mm->mmap_base += mm->delta_mmap;
19974+#endif
19975+
19976 mm->get_unmapped_area = arch_get_unmapped_area;
19977 mm->unmap_area = arch_unmap_area;
19978 } else {
19979- mm->mmap_base = mmap_base();
19980+ mm->mmap_base = mmap_base(mm);
19981+
19982+#ifdef CONFIG_PAX_RANDMMAP
19983+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19984+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
19985+#endif
19986+
19987 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
19988 mm->unmap_area = arch_unmap_area_topdown;
19989 }
19990diff -urNp linux-3.0.4/arch/x86/mm/mmio-mod.c linux-3.0.4/arch/x86/mm/mmio-mod.c
19991--- linux-3.0.4/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
19992+++ linux-3.0.4/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
19993@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
19994 break;
19995 default:
19996 {
19997- unsigned char *ip = (unsigned char *)instptr;
19998+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
19999 my_trace->opcode = MMIO_UNKNOWN_OP;
20000 my_trace->width = 0;
20001 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
20002@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
20003 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
20004 void __iomem *addr)
20005 {
20006- static atomic_t next_id;
20007+ static atomic_unchecked_t next_id;
20008 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20009 /* These are page-unaligned. */
20010 struct mmiotrace_map map = {
20011@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20012 .private = trace
20013 },
20014 .phys = offset,
20015- .id = atomic_inc_return(&next_id)
20016+ .id = atomic_inc_return_unchecked(&next_id)
20017 };
20018 map.map_id = trace->id;
20019
20020diff -urNp linux-3.0.4/arch/x86/mm/pageattr.c linux-3.0.4/arch/x86/mm/pageattr.c
20021--- linux-3.0.4/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
20022+++ linux-3.0.4/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
20023@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20024 */
20025 #ifdef CONFIG_PCI_BIOS
20026 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20027- pgprot_val(forbidden) |= _PAGE_NX;
20028+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20029 #endif
20030
20031 /*
20032@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20033 * Does not cover __inittext since that is gone later on. On
20034 * 64bit we do not enforce !NX on the low mapping
20035 */
20036- if (within(address, (unsigned long)_text, (unsigned long)_etext))
20037- pgprot_val(forbidden) |= _PAGE_NX;
20038+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20039+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20040
20041+#ifdef CONFIG_DEBUG_RODATA
20042 /*
20043 * The .rodata section needs to be read-only. Using the pfn
20044 * catches all aliases.
20045@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20046 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20047 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20048 pgprot_val(forbidden) |= _PAGE_RW;
20049+#endif
20050
20051 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20052 /*
20053@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20054 }
20055 #endif
20056
20057+#ifdef CONFIG_PAX_KERNEXEC
20058+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20059+ pgprot_val(forbidden) |= _PAGE_RW;
20060+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20061+ }
20062+#endif
20063+
20064 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20065
20066 return prot;
20067@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20068 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20069 {
20070 /* change init_mm */
20071+ pax_open_kernel();
20072 set_pte_atomic(kpte, pte);
20073+
20074 #ifdef CONFIG_X86_32
20075 if (!SHARED_KERNEL_PMD) {
20076+
20077+#ifdef CONFIG_PAX_PER_CPU_PGD
20078+ unsigned long cpu;
20079+#else
20080 struct page *page;
20081+#endif
20082
20083+#ifdef CONFIG_PAX_PER_CPU_PGD
20084+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20085+ pgd_t *pgd = get_cpu_pgd(cpu);
20086+#else
20087 list_for_each_entry(page, &pgd_list, lru) {
20088- pgd_t *pgd;
20089+ pgd_t *pgd = (pgd_t *)page_address(page);
20090+#endif
20091+
20092 pud_t *pud;
20093 pmd_t *pmd;
20094
20095- pgd = (pgd_t *)page_address(page) + pgd_index(address);
20096+ pgd += pgd_index(address);
20097 pud = pud_offset(pgd, address);
20098 pmd = pmd_offset(pud, address);
20099 set_pte_atomic((pte_t *)pmd, pte);
20100 }
20101 }
20102 #endif
20103+ pax_close_kernel();
20104 }
20105
20106 static int
20107diff -urNp linux-3.0.4/arch/x86/mm/pageattr-test.c linux-3.0.4/arch/x86/mm/pageattr-test.c
20108--- linux-3.0.4/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
20109+++ linux-3.0.4/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
20110@@ -36,7 +36,7 @@ enum {
20111
20112 static int pte_testbit(pte_t pte)
20113 {
20114- return pte_flags(pte) & _PAGE_UNUSED1;
20115+ return pte_flags(pte) & _PAGE_CPA_TEST;
20116 }
20117
20118 struct split_state {
20119diff -urNp linux-3.0.4/arch/x86/mm/pat.c linux-3.0.4/arch/x86/mm/pat.c
20120--- linux-3.0.4/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
20121+++ linux-3.0.4/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
20122@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20123
20124 if (!entry) {
20125 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20126- current->comm, current->pid, start, end);
20127+ current->comm, task_pid_nr(current), start, end);
20128 return -EINVAL;
20129 }
20130
20131@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20132 while (cursor < to) {
20133 if (!devmem_is_allowed(pfn)) {
20134 printk(KERN_INFO
20135- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20136- current->comm, from, to);
20137+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20138+ current->comm, from, to, cursor);
20139 return 0;
20140 }
20141 cursor += PAGE_SIZE;
20142@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20143 printk(KERN_INFO
20144 "%s:%d ioremap_change_attr failed %s "
20145 "for %Lx-%Lx\n",
20146- current->comm, current->pid,
20147+ current->comm, task_pid_nr(current),
20148 cattr_name(flags),
20149 base, (unsigned long long)(base + size));
20150 return -EINVAL;
20151@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20152 if (want_flags != flags) {
20153 printk(KERN_WARNING
20154 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20155- current->comm, current->pid,
20156+ current->comm, task_pid_nr(current),
20157 cattr_name(want_flags),
20158 (unsigned long long)paddr,
20159 (unsigned long long)(paddr + size),
20160@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20161 free_memtype(paddr, paddr + size);
20162 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20163 " for %Lx-%Lx, got %s\n",
20164- current->comm, current->pid,
20165+ current->comm, task_pid_nr(current),
20166 cattr_name(want_flags),
20167 (unsigned long long)paddr,
20168 (unsigned long long)(paddr + size),
20169diff -urNp linux-3.0.4/arch/x86/mm/pf_in.c linux-3.0.4/arch/x86/mm/pf_in.c
20170--- linux-3.0.4/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
20171+++ linux-3.0.4/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
20172@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20173 int i;
20174 enum reason_type rv = OTHERS;
20175
20176- p = (unsigned char *)ins_addr;
20177+ p = (unsigned char *)ktla_ktva(ins_addr);
20178 p += skip_prefix(p, &prf);
20179 p += get_opcode(p, &opcode);
20180
20181@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20182 struct prefix_bits prf;
20183 int i;
20184
20185- p = (unsigned char *)ins_addr;
20186+ p = (unsigned char *)ktla_ktva(ins_addr);
20187 p += skip_prefix(p, &prf);
20188 p += get_opcode(p, &opcode);
20189
20190@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20191 struct prefix_bits prf;
20192 int i;
20193
20194- p = (unsigned char *)ins_addr;
20195+ p = (unsigned char *)ktla_ktva(ins_addr);
20196 p += skip_prefix(p, &prf);
20197 p += get_opcode(p, &opcode);
20198
20199@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
20200 struct prefix_bits prf;
20201 int i;
20202
20203- p = (unsigned char *)ins_addr;
20204+ p = (unsigned char *)ktla_ktva(ins_addr);
20205 p += skip_prefix(p, &prf);
20206 p += get_opcode(p, &opcode);
20207 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20208@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
20209 struct prefix_bits prf;
20210 int i;
20211
20212- p = (unsigned char *)ins_addr;
20213+ p = (unsigned char *)ktla_ktva(ins_addr);
20214 p += skip_prefix(p, &prf);
20215 p += get_opcode(p, &opcode);
20216 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20217diff -urNp linux-3.0.4/arch/x86/mm/pgtable_32.c linux-3.0.4/arch/x86/mm/pgtable_32.c
20218--- linux-3.0.4/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
20219+++ linux-3.0.4/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
20220@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20221 return;
20222 }
20223 pte = pte_offset_kernel(pmd, vaddr);
20224+
20225+ pax_open_kernel();
20226 if (pte_val(pteval))
20227 set_pte_at(&init_mm, vaddr, pte, pteval);
20228 else
20229 pte_clear(&init_mm, vaddr, pte);
20230+ pax_close_kernel();
20231
20232 /*
20233 * It's enough to flush this one mapping.
20234diff -urNp linux-3.0.4/arch/x86/mm/pgtable.c linux-3.0.4/arch/x86/mm/pgtable.c
20235--- linux-3.0.4/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
20236+++ linux-3.0.4/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
20237@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20238 list_del(&page->lru);
20239 }
20240
20241-#define UNSHARED_PTRS_PER_PGD \
20242- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20243+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20244+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20245
20246+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20247+{
20248+ while (count--)
20249+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20250+}
20251+#endif
20252+
20253+#ifdef CONFIG_PAX_PER_CPU_PGD
20254+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20255+{
20256+ while (count--)
20257+
20258+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20259+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20260+#else
20261+ *dst++ = *src++;
20262+#endif
20263
20264+}
20265+#endif
20266+
20267+#ifdef CONFIG_X86_64
20268+#define pxd_t pud_t
20269+#define pyd_t pgd_t
20270+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20271+#define pxd_free(mm, pud) pud_free((mm), (pud))
20272+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20273+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20274+#define PYD_SIZE PGDIR_SIZE
20275+#else
20276+#define pxd_t pmd_t
20277+#define pyd_t pud_t
20278+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20279+#define pxd_free(mm, pud) pmd_free((mm), (pud))
20280+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20281+#define pyd_offset(mm ,address) pud_offset((mm), (address))
20282+#define PYD_SIZE PUD_SIZE
20283+#endif
20284+
20285+#ifdef CONFIG_PAX_PER_CPU_PGD
20286+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20287+static inline void pgd_dtor(pgd_t *pgd) {}
20288+#else
20289 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20290 {
20291 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20292@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20293 pgd_list_del(pgd);
20294 spin_unlock(&pgd_lock);
20295 }
20296+#endif
20297
20298 /*
20299 * List of all pgd's needed for non-PAE so it can invalidate entries
20300@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20301 * -- wli
20302 */
20303
20304-#ifdef CONFIG_X86_PAE
20305+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20306 /*
20307 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20308 * updating the top-level pagetable entries to guarantee the
20309@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20310 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20311 * and initialize the kernel pmds here.
20312 */
20313-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20314+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20315
20316 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20317 {
20318@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20319 */
20320 flush_tlb_mm(mm);
20321 }
20322+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20323+#define PREALLOCATED_PXDS USER_PGD_PTRS
20324 #else /* !CONFIG_X86_PAE */
20325
20326 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20327-#define PREALLOCATED_PMDS 0
20328+#define PREALLOCATED_PXDS 0
20329
20330 #endif /* CONFIG_X86_PAE */
20331
20332-static void free_pmds(pmd_t *pmds[])
20333+static void free_pxds(pxd_t *pxds[])
20334 {
20335 int i;
20336
20337- for(i = 0; i < PREALLOCATED_PMDS; i++)
20338- if (pmds[i])
20339- free_page((unsigned long)pmds[i]);
20340+ for(i = 0; i < PREALLOCATED_PXDS; i++)
20341+ if (pxds[i])
20342+ free_page((unsigned long)pxds[i]);
20343 }
20344
20345-static int preallocate_pmds(pmd_t *pmds[])
20346+static int preallocate_pxds(pxd_t *pxds[])
20347 {
20348 int i;
20349 bool failed = false;
20350
20351- for(i = 0; i < PREALLOCATED_PMDS; i++) {
20352- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20353- if (pmd == NULL)
20354+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
20355+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20356+ if (pxd == NULL)
20357 failed = true;
20358- pmds[i] = pmd;
20359+ pxds[i] = pxd;
20360 }
20361
20362 if (failed) {
20363- free_pmds(pmds);
20364+ free_pxds(pxds);
20365 return -ENOMEM;
20366 }
20367
20368@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20369 * preallocate which never got a corresponding vma will need to be
20370 * freed manually.
20371 */
20372-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20373+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20374 {
20375 int i;
20376
20377- for(i = 0; i < PREALLOCATED_PMDS; i++) {
20378+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
20379 pgd_t pgd = pgdp[i];
20380
20381 if (pgd_val(pgd) != 0) {
20382- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20383+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20384
20385- pgdp[i] = native_make_pgd(0);
20386+ set_pgd(pgdp + i, native_make_pgd(0));
20387
20388- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20389- pmd_free(mm, pmd);
20390+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20391+ pxd_free(mm, pxd);
20392 }
20393 }
20394 }
20395
20396-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20397+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20398 {
20399- pud_t *pud;
20400+ pyd_t *pyd;
20401 unsigned long addr;
20402 int i;
20403
20404- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20405+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20406 return;
20407
20408- pud = pud_offset(pgd, 0);
20409+#ifdef CONFIG_X86_64
20410+ pyd = pyd_offset(mm, 0L);
20411+#else
20412+ pyd = pyd_offset(pgd, 0L);
20413+#endif
20414
20415- for (addr = i = 0; i < PREALLOCATED_PMDS;
20416- i++, pud++, addr += PUD_SIZE) {
20417- pmd_t *pmd = pmds[i];
20418+ for (addr = i = 0; i < PREALLOCATED_PXDS;
20419+ i++, pyd++, addr += PYD_SIZE) {
20420+ pxd_t *pxd = pxds[i];
20421
20422 if (i >= KERNEL_PGD_BOUNDARY)
20423- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20424- sizeof(pmd_t) * PTRS_PER_PMD);
20425+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20426+ sizeof(pxd_t) * PTRS_PER_PMD);
20427
20428- pud_populate(mm, pud, pmd);
20429+ pyd_populate(mm, pyd, pxd);
20430 }
20431 }
20432
20433 pgd_t *pgd_alloc(struct mm_struct *mm)
20434 {
20435 pgd_t *pgd;
20436- pmd_t *pmds[PREALLOCATED_PMDS];
20437+ pxd_t *pxds[PREALLOCATED_PXDS];
20438
20439 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20440
20441@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20442
20443 mm->pgd = pgd;
20444
20445- if (preallocate_pmds(pmds) != 0)
20446+ if (preallocate_pxds(pxds) != 0)
20447 goto out_free_pgd;
20448
20449 if (paravirt_pgd_alloc(mm) != 0)
20450- goto out_free_pmds;
20451+ goto out_free_pxds;
20452
20453 /*
20454 * Make sure that pre-populating the pmds is atomic with
20455@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20456 spin_lock(&pgd_lock);
20457
20458 pgd_ctor(mm, pgd);
20459- pgd_prepopulate_pmd(mm, pgd, pmds);
20460+ pgd_prepopulate_pxd(mm, pgd, pxds);
20461
20462 spin_unlock(&pgd_lock);
20463
20464 return pgd;
20465
20466-out_free_pmds:
20467- free_pmds(pmds);
20468+out_free_pxds:
20469+ free_pxds(pxds);
20470 out_free_pgd:
20471 free_page((unsigned long)pgd);
20472 out:
20473@@ -295,7 +344,7 @@ out:
20474
20475 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20476 {
20477- pgd_mop_up_pmds(mm, pgd);
20478+ pgd_mop_up_pxds(mm, pgd);
20479 pgd_dtor(pgd);
20480 paravirt_pgd_free(mm, pgd);
20481 free_page((unsigned long)pgd);
20482diff -urNp linux-3.0.4/arch/x86/mm/setup_nx.c linux-3.0.4/arch/x86/mm/setup_nx.c
20483--- linux-3.0.4/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
20484+++ linux-3.0.4/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
20485@@ -5,8 +5,10 @@
20486 #include <asm/pgtable.h>
20487 #include <asm/proto.h>
20488
20489+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20490 static int disable_nx __cpuinitdata;
20491
20492+#ifndef CONFIG_PAX_PAGEEXEC
20493 /*
20494 * noexec = on|off
20495 *
20496@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20497 return 0;
20498 }
20499 early_param("noexec", noexec_setup);
20500+#endif
20501+
20502+#endif
20503
20504 void __cpuinit x86_configure_nx(void)
20505 {
20506+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20507 if (cpu_has_nx && !disable_nx)
20508 __supported_pte_mask |= _PAGE_NX;
20509 else
20510+#endif
20511 __supported_pte_mask &= ~_PAGE_NX;
20512 }
20513
20514diff -urNp linux-3.0.4/arch/x86/mm/tlb.c linux-3.0.4/arch/x86/mm/tlb.c
20515--- linux-3.0.4/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
20516+++ linux-3.0.4/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
20517@@ -65,7 +65,11 @@ void leave_mm(int cpu)
20518 BUG();
20519 cpumask_clear_cpu(cpu,
20520 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20521+
20522+#ifndef CONFIG_PAX_PER_CPU_PGD
20523 load_cr3(swapper_pg_dir);
20524+#endif
20525+
20526 }
20527 EXPORT_SYMBOL_GPL(leave_mm);
20528
20529diff -urNp linux-3.0.4/arch/x86/net/bpf_jit_comp.c linux-3.0.4/arch/x86/net/bpf_jit_comp.c
20530--- linux-3.0.4/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
20531+++ linux-3.0.4/arch/x86/net/bpf_jit_comp.c 2011-08-23 21:47:55.000000000 -0400
20532@@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
20533 module_free(NULL, image);
20534 return;
20535 }
20536+ pax_open_kernel();
20537 memcpy(image + proglen, temp, ilen);
20538+ pax_close_kernel();
20539 }
20540 proglen += ilen;
20541 addrs[i] = proglen;
20542@@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
20543 break;
20544 }
20545 if (proglen == oldproglen) {
20546- image = module_alloc(max_t(unsigned int,
20547+ image = module_alloc_exec(max_t(unsigned int,
20548 proglen,
20549 sizeof(struct work_struct)));
20550 if (!image)
20551diff -urNp linux-3.0.4/arch/x86/oprofile/backtrace.c linux-3.0.4/arch/x86/oprofile/backtrace.c
20552--- linux-3.0.4/arch/x86/oprofile/backtrace.c 2011-08-23 21:44:40.000000000 -0400
20553+++ linux-3.0.4/arch/x86/oprofile/backtrace.c 2011-08-23 21:47:55.000000000 -0400
20554@@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
20555 {
20556 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
20557
20558- if (!user_mode_vm(regs)) {
20559+ if (!user_mode(regs)) {
20560 unsigned long stack = kernel_stack_pointer(regs);
20561 if (depth)
20562 dump_trace(NULL, regs, (unsigned long *)stack, 0,
20563diff -urNp linux-3.0.4/arch/x86/pci/mrst.c linux-3.0.4/arch/x86/pci/mrst.c
20564--- linux-3.0.4/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
20565+++ linux-3.0.4/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
20566@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
20567 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
20568 pci_mmcfg_late_init();
20569 pcibios_enable_irq = mrst_pci_irq_enable;
20570- pci_root_ops = pci_mrst_ops;
20571+ pax_open_kernel();
20572+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
20573+ pax_close_kernel();
20574 /* Continue with standard init */
20575 return 1;
20576 }
20577diff -urNp linux-3.0.4/arch/x86/pci/pcbios.c linux-3.0.4/arch/x86/pci/pcbios.c
20578--- linux-3.0.4/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
20579+++ linux-3.0.4/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
20580@@ -79,50 +79,93 @@ union bios32 {
20581 static struct {
20582 unsigned long address;
20583 unsigned short segment;
20584-} bios32_indirect = { 0, __KERNEL_CS };
20585+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
20586
20587 /*
20588 * Returns the entry point for the given service, NULL on error
20589 */
20590
20591-static unsigned long bios32_service(unsigned long service)
20592+static unsigned long __devinit bios32_service(unsigned long service)
20593 {
20594 unsigned char return_code; /* %al */
20595 unsigned long address; /* %ebx */
20596 unsigned long length; /* %ecx */
20597 unsigned long entry; /* %edx */
20598 unsigned long flags;
20599+ struct desc_struct d, *gdt;
20600
20601 local_irq_save(flags);
20602- __asm__("lcall *(%%edi); cld"
20603+
20604+ gdt = get_cpu_gdt_table(smp_processor_id());
20605+
20606+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
20607+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20608+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
20609+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20610+
20611+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
20612 : "=a" (return_code),
20613 "=b" (address),
20614 "=c" (length),
20615 "=d" (entry)
20616 : "0" (service),
20617 "1" (0),
20618- "D" (&bios32_indirect));
20619+ "D" (&bios32_indirect),
20620+ "r"(__PCIBIOS_DS)
20621+ : "memory");
20622+
20623+ pax_open_kernel();
20624+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
20625+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
20626+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
20627+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
20628+ pax_close_kernel();
20629+
20630 local_irq_restore(flags);
20631
20632 switch (return_code) {
20633- case 0:
20634- return address + entry;
20635- case 0x80: /* Not present */
20636- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20637- return 0;
20638- default: /* Shouldn't happen */
20639- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20640- service, return_code);
20641+ case 0: {
20642+ int cpu;
20643+ unsigned char flags;
20644+
20645+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
20646+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
20647+ printk(KERN_WARNING "bios32_service: not valid\n");
20648 return 0;
20649+ }
20650+ address = address + PAGE_OFFSET;
20651+ length += 16UL; /* some BIOSs underreport this... */
20652+ flags = 4;
20653+ if (length >= 64*1024*1024) {
20654+ length >>= PAGE_SHIFT;
20655+ flags |= 8;
20656+ }
20657+
20658+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
20659+ gdt = get_cpu_gdt_table(cpu);
20660+ pack_descriptor(&d, address, length, 0x9b, flags);
20661+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20662+ pack_descriptor(&d, address, length, 0x93, flags);
20663+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20664+ }
20665+ return entry;
20666+ }
20667+ case 0x80: /* Not present */
20668+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20669+ return 0;
20670+ default: /* Shouldn't happen */
20671+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20672+ service, return_code);
20673+ return 0;
20674 }
20675 }
20676
20677 static struct {
20678 unsigned long address;
20679 unsigned short segment;
20680-} pci_indirect = { 0, __KERNEL_CS };
20681+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
20682
20683-static int pci_bios_present;
20684+static int pci_bios_present __read_only;
20685
20686 static int __devinit check_pcibios(void)
20687 {
20688@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
20689 unsigned long flags, pcibios_entry;
20690
20691 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
20692- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
20693+ pci_indirect.address = pcibios_entry;
20694
20695 local_irq_save(flags);
20696- __asm__(
20697- "lcall *(%%edi); cld\n\t"
20698+ __asm__("movw %w6, %%ds\n\t"
20699+ "lcall *%%ss:(%%edi); cld\n\t"
20700+ "push %%ss\n\t"
20701+ "pop %%ds\n\t"
20702 "jc 1f\n\t"
20703 "xor %%ah, %%ah\n"
20704 "1:"
20705@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
20706 "=b" (ebx),
20707 "=c" (ecx)
20708 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
20709- "D" (&pci_indirect)
20710+ "D" (&pci_indirect),
20711+ "r" (__PCIBIOS_DS)
20712 : "memory");
20713 local_irq_restore(flags);
20714
20715@@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
20716
20717 switch (len) {
20718 case 1:
20719- __asm__("lcall *(%%esi); cld\n\t"
20720+ __asm__("movw %w6, %%ds\n\t"
20721+ "lcall *%%ss:(%%esi); cld\n\t"
20722+ "push %%ss\n\t"
20723+ "pop %%ds\n\t"
20724 "jc 1f\n\t"
20725 "xor %%ah, %%ah\n"
20726 "1:"
20727@@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
20728 : "1" (PCIBIOS_READ_CONFIG_BYTE),
20729 "b" (bx),
20730 "D" ((long)reg),
20731- "S" (&pci_indirect));
20732+ "S" (&pci_indirect),
20733+ "r" (__PCIBIOS_DS));
20734 /*
20735 * Zero-extend the result beyond 8 bits, do not trust the
20736 * BIOS having done it:
20737@@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
20738 *value &= 0xff;
20739 break;
20740 case 2:
20741- __asm__("lcall *(%%esi); cld\n\t"
20742+ __asm__("movw %w6, %%ds\n\t"
20743+ "lcall *%%ss:(%%esi); cld\n\t"
20744+ "push %%ss\n\t"
20745+ "pop %%ds\n\t"
20746 "jc 1f\n\t"
20747 "xor %%ah, %%ah\n"
20748 "1:"
20749@@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
20750 : "1" (PCIBIOS_READ_CONFIG_WORD),
20751 "b" (bx),
20752 "D" ((long)reg),
20753- "S" (&pci_indirect));
20754+ "S" (&pci_indirect),
20755+ "r" (__PCIBIOS_DS));
20756 /*
20757 * Zero-extend the result beyond 16 bits, do not trust the
20758 * BIOS having done it:
20759@@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
20760 *value &= 0xffff;
20761 break;
20762 case 4:
20763- __asm__("lcall *(%%esi); cld\n\t"
20764+ __asm__("movw %w6, %%ds\n\t"
20765+ "lcall *%%ss:(%%esi); cld\n\t"
20766+ "push %%ss\n\t"
20767+ "pop %%ds\n\t"
20768 "jc 1f\n\t"
20769 "xor %%ah, %%ah\n"
20770 "1:"
20771@@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
20772 : "1" (PCIBIOS_READ_CONFIG_DWORD),
20773 "b" (bx),
20774 "D" ((long)reg),
20775- "S" (&pci_indirect));
20776+ "S" (&pci_indirect),
20777+ "r" (__PCIBIOS_DS));
20778 break;
20779 }
20780
20781@@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
20782
20783 switch (len) {
20784 case 1:
20785- __asm__("lcall *(%%esi); cld\n\t"
20786+ __asm__("movw %w6, %%ds\n\t"
20787+ "lcall *%%ss:(%%esi); cld\n\t"
20788+ "push %%ss\n\t"
20789+ "pop %%ds\n\t"
20790 "jc 1f\n\t"
20791 "xor %%ah, %%ah\n"
20792 "1:"
20793@@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
20794 "c" (value),
20795 "b" (bx),
20796 "D" ((long)reg),
20797- "S" (&pci_indirect));
20798+ "S" (&pci_indirect),
20799+ "r" (__PCIBIOS_DS));
20800 break;
20801 case 2:
20802- __asm__("lcall *(%%esi); cld\n\t"
20803+ __asm__("movw %w6, %%ds\n\t"
20804+ "lcall *%%ss:(%%esi); cld\n\t"
20805+ "push %%ss\n\t"
20806+ "pop %%ds\n\t"
20807 "jc 1f\n\t"
20808 "xor %%ah, %%ah\n"
20809 "1:"
20810@@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
20811 "c" (value),
20812 "b" (bx),
20813 "D" ((long)reg),
20814- "S" (&pci_indirect));
20815+ "S" (&pci_indirect),
20816+ "r" (__PCIBIOS_DS));
20817 break;
20818 case 4:
20819- __asm__("lcall *(%%esi); cld\n\t"
20820+ __asm__("movw %w6, %%ds\n\t"
20821+ "lcall *%%ss:(%%esi); cld\n\t"
20822+ "push %%ss\n\t"
20823+ "pop %%ds\n\t"
20824 "jc 1f\n\t"
20825 "xor %%ah, %%ah\n"
20826 "1:"
20827@@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
20828 "c" (value),
20829 "b" (bx),
20830 "D" ((long)reg),
20831- "S" (&pci_indirect));
20832+ "S" (&pci_indirect),
20833+ "r" (__PCIBIOS_DS));
20834 break;
20835 }
20836
20837@@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
20838
20839 DBG("PCI: Fetching IRQ routing table... ");
20840 __asm__("push %%es\n\t"
20841+ "movw %w8, %%ds\n\t"
20842 "push %%ds\n\t"
20843 "pop %%es\n\t"
20844- "lcall *(%%esi); cld\n\t"
20845+ "lcall *%%ss:(%%esi); cld\n\t"
20846 "pop %%es\n\t"
20847+ "push %%ss\n\t"
20848+ "pop %%ds\n"
20849 "jc 1f\n\t"
20850 "xor %%ah, %%ah\n"
20851 "1:"
20852@@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
20853 "1" (0),
20854 "D" ((long) &opt),
20855 "S" (&pci_indirect),
20856- "m" (opt)
20857+ "m" (opt),
20858+ "r" (__PCIBIOS_DS)
20859 : "memory");
20860 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
20861 if (ret & 0xff00)
20862@@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
20863 {
20864 int ret;
20865
20866- __asm__("lcall *(%%esi); cld\n\t"
20867+ __asm__("movw %w5, %%ds\n\t"
20868+ "lcall *%%ss:(%%esi); cld\n\t"
20869+ "push %%ss\n\t"
20870+ "pop %%ds\n"
20871 "jc 1f\n\t"
20872 "xor %%ah, %%ah\n"
20873 "1:"
20874@@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
20875 : "0" (PCIBIOS_SET_PCI_HW_INT),
20876 "b" ((dev->bus->number << 8) | dev->devfn),
20877 "c" ((irq << 8) | (pin + 10)),
20878- "S" (&pci_indirect));
20879+ "S" (&pci_indirect),
20880+ "r" (__PCIBIOS_DS));
20881 return !(ret & 0xff00);
20882 }
20883 EXPORT_SYMBOL(pcibios_set_irq_routing);
20884diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_32.c linux-3.0.4/arch/x86/platform/efi/efi_32.c
20885--- linux-3.0.4/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
20886+++ linux-3.0.4/arch/x86/platform/efi/efi_32.c 2011-08-23 21:47:55.000000000 -0400
20887@@ -38,70 +38,37 @@
20888 */
20889
20890 static unsigned long efi_rt_eflags;
20891-static pgd_t efi_bak_pg_dir_pointer[2];
20892+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
20893
20894-void efi_call_phys_prelog(void)
20895+void __init efi_call_phys_prelog(void)
20896 {
20897- unsigned long cr4;
20898- unsigned long temp;
20899 struct desc_ptr gdt_descr;
20900
20901 local_irq_save(efi_rt_eflags);
20902
20903- /*
20904- * If I don't have PAE, I should just duplicate two entries in page
20905- * directory. If I have PAE, I just need to duplicate one entry in
20906- * page directory.
20907- */
20908- cr4 = read_cr4_safe();
20909-
20910- if (cr4 & X86_CR4_PAE) {
20911- efi_bak_pg_dir_pointer[0].pgd =
20912- swapper_pg_dir[pgd_index(0)].pgd;
20913- swapper_pg_dir[0].pgd =
20914- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20915- } else {
20916- efi_bak_pg_dir_pointer[0].pgd =
20917- swapper_pg_dir[pgd_index(0)].pgd;
20918- efi_bak_pg_dir_pointer[1].pgd =
20919- swapper_pg_dir[pgd_index(0x400000)].pgd;
20920- swapper_pg_dir[pgd_index(0)].pgd =
20921- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20922- temp = PAGE_OFFSET + 0x400000;
20923- swapper_pg_dir[pgd_index(0x400000)].pgd =
20924- swapper_pg_dir[pgd_index(temp)].pgd;
20925- }
20926+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
20927+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20928+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
20929
20930 /*
20931 * After the lock is released, the original page table is restored.
20932 */
20933 __flush_tlb_all();
20934
20935- gdt_descr.address = __pa(get_cpu_gdt_table(0));
20936+ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
20937 gdt_descr.size = GDT_SIZE - 1;
20938 load_gdt(&gdt_descr);
20939 }
20940
20941-void efi_call_phys_epilog(void)
20942+void __init efi_call_phys_epilog(void)
20943 {
20944- unsigned long cr4;
20945 struct desc_ptr gdt_descr;
20946
20947- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
20948+ gdt_descr.address = get_cpu_gdt_table(0);
20949 gdt_descr.size = GDT_SIZE - 1;
20950 load_gdt(&gdt_descr);
20951
20952- cr4 = read_cr4_safe();
20953-
20954- if (cr4 & X86_CR4_PAE) {
20955- swapper_pg_dir[pgd_index(0)].pgd =
20956- efi_bak_pg_dir_pointer[0].pgd;
20957- } else {
20958- swapper_pg_dir[pgd_index(0)].pgd =
20959- efi_bak_pg_dir_pointer[0].pgd;
20960- swapper_pg_dir[pgd_index(0x400000)].pgd =
20961- efi_bak_pg_dir_pointer[1].pgd;
20962- }
20963+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
20964
20965 /*
20966 * After the lock is released, the original page table is restored.
20967diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S
20968--- linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
20969+++ linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S 2011-08-23 21:47:55.000000000 -0400
20970@@ -6,6 +6,7 @@
20971 */
20972
20973 #include <linux/linkage.h>
20974+#include <linux/init.h>
20975 #include <asm/page_types.h>
20976
20977 /*
20978@@ -20,7 +21,7 @@
20979 * service functions will comply with gcc calling convention, too.
20980 */
20981
20982-.text
20983+__INIT
20984 ENTRY(efi_call_phys)
20985 /*
20986 * 0. The function can only be called in Linux kernel. So CS has been
20987@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
20988 * The mapping of lower virtual memory has been created in prelog and
20989 * epilog.
20990 */
20991- movl $1f, %edx
20992- subl $__PAGE_OFFSET, %edx
20993- jmp *%edx
20994+ jmp 1f-__PAGE_OFFSET
20995 1:
20996
20997 /*
20998@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
20999 * parameter 2, ..., param n. To make things easy, we save the return
21000 * address of efi_call_phys in a global variable.
21001 */
21002- popl %edx
21003- movl %edx, saved_return_addr
21004- /* get the function pointer into ECX*/
21005- popl %ecx
21006- movl %ecx, efi_rt_function_ptr
21007- movl $2f, %edx
21008- subl $__PAGE_OFFSET, %edx
21009- pushl %edx
21010+ popl (saved_return_addr)
21011+ popl (efi_rt_function_ptr)
21012
21013 /*
21014 * 3. Clear PG bit in %CR0.
21015@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
21016 /*
21017 * 5. Call the physical function.
21018 */
21019- jmp *%ecx
21020+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
21021
21022-2:
21023 /*
21024 * 6. After EFI runtime service returns, control will return to
21025 * following instruction. We'd better readjust stack pointer first.
21026@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
21027 movl %cr0, %edx
21028 orl $0x80000000, %edx
21029 movl %edx, %cr0
21030- jmp 1f
21031-1:
21032+
21033 /*
21034 * 8. Now restore the virtual mode from flat mode by
21035 * adding EIP with PAGE_OFFSET.
21036 */
21037- movl $1f, %edx
21038- jmp *%edx
21039+ jmp 1f+__PAGE_OFFSET
21040 1:
21041
21042 /*
21043 * 9. Balance the stack. And because EAX contain the return value,
21044 * we'd better not clobber it.
21045 */
21046- leal efi_rt_function_ptr, %edx
21047- movl (%edx), %ecx
21048- pushl %ecx
21049+ pushl (efi_rt_function_ptr)
21050
21051 /*
21052- * 10. Push the saved return address onto the stack and return.
21053+ * 10. Return to the saved return address.
21054 */
21055- leal saved_return_addr, %edx
21056- movl (%edx), %ecx
21057- pushl %ecx
21058- ret
21059+ jmpl *(saved_return_addr)
21060 ENDPROC(efi_call_phys)
21061 .previous
21062
21063-.data
21064+__INITDATA
21065 saved_return_addr:
21066 .long 0
21067 efi_rt_function_ptr:
21068diff -urNp linux-3.0.4/arch/x86/platform/mrst/mrst.c linux-3.0.4/arch/x86/platform/mrst/mrst.c
21069--- linux-3.0.4/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
21070+++ linux-3.0.4/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
21071@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21072 }
21073
21074 /* Reboot and power off are handled by the SCU on a MID device */
21075-static void mrst_power_off(void)
21076+static __noreturn void mrst_power_off(void)
21077 {
21078 intel_scu_ipc_simple_command(0xf1, 1);
21079+ BUG();
21080 }
21081
21082-static void mrst_reboot(void)
21083+static __noreturn void mrst_reboot(void)
21084 {
21085 intel_scu_ipc_simple_command(0xf1, 0);
21086+ BUG();
21087 }
21088
21089 /*
21090diff -urNp linux-3.0.4/arch/x86/platform/uv/tlb_uv.c linux-3.0.4/arch/x86/platform/uv/tlb_uv.c
21091--- linux-3.0.4/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
21092+++ linux-3.0.4/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
21093@@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
21094 cpumask_t mask;
21095 struct reset_args reset_args;
21096
21097+ pax_track_stack();
21098+
21099 reset_args.sender = sender;
21100 cpus_clear(mask);
21101 /* find a single cpu for each uvhub in this distribution mask */
21102diff -urNp linux-3.0.4/arch/x86/power/cpu.c linux-3.0.4/arch/x86/power/cpu.c
21103--- linux-3.0.4/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
21104+++ linux-3.0.4/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
21105@@ -130,7 +130,7 @@ static void do_fpu_end(void)
21106 static void fix_processor_context(void)
21107 {
21108 int cpu = smp_processor_id();
21109- struct tss_struct *t = &per_cpu(init_tss, cpu);
21110+ struct tss_struct *t = init_tss + cpu;
21111
21112 set_tss_desc(cpu, t); /*
21113 * This just modifies memory; should not be
21114@@ -140,7 +140,9 @@ static void fix_processor_context(void)
21115 */
21116
21117 #ifdef CONFIG_X86_64
21118+ pax_open_kernel();
21119 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21120+ pax_close_kernel();
21121
21122 syscall_init(); /* This sets MSR_*STAR and related */
21123 #endif
21124diff -urNp linux-3.0.4/arch/x86/vdso/Makefile linux-3.0.4/arch/x86/vdso/Makefile
21125--- linux-3.0.4/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
21126+++ linux-3.0.4/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
21127@@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
21128 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21129 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21130
21131-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21132+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21133 GCOV_PROFILE := n
21134
21135 #
21136diff -urNp linux-3.0.4/arch/x86/vdso/vdso32-setup.c linux-3.0.4/arch/x86/vdso/vdso32-setup.c
21137--- linux-3.0.4/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
21138+++ linux-3.0.4/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
21139@@ -25,6 +25,7 @@
21140 #include <asm/tlbflush.h>
21141 #include <asm/vdso.h>
21142 #include <asm/proto.h>
21143+#include <asm/mman.h>
21144
21145 enum {
21146 VDSO_DISABLED = 0,
21147@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21148 void enable_sep_cpu(void)
21149 {
21150 int cpu = get_cpu();
21151- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21152+ struct tss_struct *tss = init_tss + cpu;
21153
21154 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21155 put_cpu();
21156@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21157 gate_vma.vm_start = FIXADDR_USER_START;
21158 gate_vma.vm_end = FIXADDR_USER_END;
21159 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21160- gate_vma.vm_page_prot = __P101;
21161+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21162 /*
21163 * Make sure the vDSO gets into every core dump.
21164 * Dumping its contents makes post-mortem fully interpretable later
21165@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21166 if (compat)
21167 addr = VDSO_HIGH_BASE;
21168 else {
21169- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21170+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21171 if (IS_ERR_VALUE(addr)) {
21172 ret = addr;
21173 goto up_fail;
21174 }
21175 }
21176
21177- current->mm->context.vdso = (void *)addr;
21178+ current->mm->context.vdso = addr;
21179
21180 if (compat_uses_vma || !compat) {
21181 /*
21182@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21183 }
21184
21185 current_thread_info()->sysenter_return =
21186- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21187+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21188
21189 up_fail:
21190 if (ret)
21191- current->mm->context.vdso = NULL;
21192+ current->mm->context.vdso = 0;
21193
21194 up_write(&mm->mmap_sem);
21195
21196@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21197
21198 const char *arch_vma_name(struct vm_area_struct *vma)
21199 {
21200- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21201+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21202 return "[vdso]";
21203+
21204+#ifdef CONFIG_PAX_SEGMEXEC
21205+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21206+ return "[vdso]";
21207+#endif
21208+
21209 return NULL;
21210 }
21211
21212@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21213 * Check to see if the corresponding task was created in compat vdso
21214 * mode.
21215 */
21216- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21217+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21218 return &gate_vma;
21219 return NULL;
21220 }
21221diff -urNp linux-3.0.4/arch/x86/vdso/vma.c linux-3.0.4/arch/x86/vdso/vma.c
21222--- linux-3.0.4/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
21223+++ linux-3.0.4/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
21224@@ -15,18 +15,19 @@
21225 #include <asm/proto.h>
21226 #include <asm/vdso.h>
21227
21228-unsigned int __read_mostly vdso_enabled = 1;
21229-
21230 extern char vdso_start[], vdso_end[];
21231 extern unsigned short vdso_sync_cpuid;
21232+extern char __vsyscall_0;
21233
21234 static struct page **vdso_pages;
21235+static struct page *vsyscall_page;
21236 static unsigned vdso_size;
21237
21238 static int __init init_vdso_vars(void)
21239 {
21240- int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
21241- int i;
21242+ size_t nbytes = vdso_end - vdso_start;
21243+ size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
21244+ size_t i;
21245
21246 vdso_size = npages << PAGE_SHIFT;
21247 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
21248@@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
21249 goto oom;
21250 for (i = 0; i < npages; i++) {
21251 struct page *p;
21252- p = alloc_page(GFP_KERNEL);
21253+ p = alloc_page(GFP_KERNEL | __GFP_ZERO);
21254 if (!p)
21255 goto oom;
21256 vdso_pages[i] = p;
21257- copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
21258+ memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
21259+ nbytes -= PAGE_SIZE;
21260 }
21261+ vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
21262
21263 return 0;
21264
21265 oom:
21266- printk("Cannot allocate vdso\n");
21267- vdso_enabled = 0;
21268- return -ENOMEM;
21269+ panic("Cannot allocate vdso\n");
21270 }
21271 subsys_initcall(init_vdso_vars);
21272
21273@@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
21274 unsigned long addr;
21275 int ret;
21276
21277- if (!vdso_enabled)
21278- return 0;
21279-
21280 down_write(&mm->mmap_sem);
21281- addr = vdso_addr(mm->start_stack, vdso_size);
21282- addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
21283+ addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
21284+ addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
21285 if (IS_ERR_VALUE(addr)) {
21286 ret = addr;
21287 goto up_fail;
21288 }
21289
21290- current->mm->context.vdso = (void *)addr;
21291+ mm->context.vdso = addr + PAGE_SIZE;
21292
21293- ret = install_special_mapping(mm, addr, vdso_size,
21294+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
21295 VM_READ|VM_EXEC|
21296- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21297+ VM_MAYREAD|VM_MAYEXEC|
21298 VM_ALWAYSDUMP,
21299- vdso_pages);
21300+ &vsyscall_page);
21301 if (ret) {
21302- current->mm->context.vdso = NULL;
21303+ mm->context.vdso = 0;
21304 goto up_fail;
21305 }
21306
21307+ ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
21308+ VM_READ|VM_EXEC|
21309+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21310+ VM_ALWAYSDUMP,
21311+ vdso_pages);
21312+ if (ret)
21313+ mm->context.vdso = 0;
21314+
21315 up_fail:
21316 up_write(&mm->mmap_sem);
21317 return ret;
21318 }
21319-
21320-static __init int vdso_setup(char *s)
21321-{
21322- vdso_enabled = simple_strtoul(s, NULL, 0);
21323- return 0;
21324-}
21325-__setup("vdso=", vdso_setup);
21326diff -urNp linux-3.0.4/arch/x86/xen/enlighten.c linux-3.0.4/arch/x86/xen/enlighten.c
21327--- linux-3.0.4/arch/x86/xen/enlighten.c 2011-08-29 23:26:13.000000000 -0400
21328+++ linux-3.0.4/arch/x86/xen/enlighten.c 2011-08-29 23:26:21.000000000 -0400
21329@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21330
21331 struct shared_info xen_dummy_shared_info;
21332
21333-void *xen_initial_gdt;
21334-
21335 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21336 __read_mostly int xen_have_vector_callback;
21337 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21338@@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21339 #endif
21340 };
21341
21342-static void xen_reboot(int reason)
21343+static __noreturn void xen_reboot(int reason)
21344 {
21345 struct sched_shutdown r = { .reason = reason };
21346
21347@@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21348 BUG();
21349 }
21350
21351-static void xen_restart(char *msg)
21352+static __noreturn void xen_restart(char *msg)
21353 {
21354 xen_reboot(SHUTDOWN_reboot);
21355 }
21356
21357-static void xen_emergency_restart(void)
21358+static __noreturn void xen_emergency_restart(void)
21359 {
21360 xen_reboot(SHUTDOWN_reboot);
21361 }
21362
21363-static void xen_machine_halt(void)
21364+static __noreturn void xen_machine_halt(void)
21365 {
21366 xen_reboot(SHUTDOWN_poweroff);
21367 }
21368@@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
21369 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21370
21371 /* Work out if we support NX */
21372- x86_configure_nx();
21373+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21374+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21375+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21376+ unsigned l, h;
21377+
21378+ __supported_pte_mask |= _PAGE_NX;
21379+ rdmsr(MSR_EFER, l, h);
21380+ l |= EFER_NX;
21381+ wrmsr(MSR_EFER, l, h);
21382+ }
21383+#endif
21384
21385 xen_setup_features();
21386
21387@@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
21388
21389 machine_ops = xen_machine_ops;
21390
21391- /*
21392- * The only reliable way to retain the initial address of the
21393- * percpu gdt_page is to remember it here, so we can go and
21394- * mark it RW later, when the initial percpu area is freed.
21395- */
21396- xen_initial_gdt = &per_cpu(gdt_page, 0);
21397-
21398 xen_smp_init();
21399
21400 #ifdef CONFIG_ACPI_NUMA
21401diff -urNp linux-3.0.4/arch/x86/xen/mmu.c linux-3.0.4/arch/x86/xen/mmu.c
21402--- linux-3.0.4/arch/x86/xen/mmu.c 2011-08-29 23:26:13.000000000 -0400
21403+++ linux-3.0.4/arch/x86/xen/mmu.c 2011-08-29 23:26:21.000000000 -0400
21404@@ -1683,6 +1683,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
21405 convert_pfn_mfn(init_level4_pgt);
21406 convert_pfn_mfn(level3_ident_pgt);
21407 convert_pfn_mfn(level3_kernel_pgt);
21408+ convert_pfn_mfn(level3_vmalloc_pgt);
21409+ convert_pfn_mfn(level3_vmemmap_pgt);
21410
21411 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21412 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21413@@ -1701,7 +1703,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
21414 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21415 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21416 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21417+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21418+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21419 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21420+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21421 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21422 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21423
21424@@ -1913,6 +1918,7 @@ static void __init xen_post_allocator_in
21425 pv_mmu_ops.set_pud = xen_set_pud;
21426 #if PAGETABLE_LEVELS == 4
21427 pv_mmu_ops.set_pgd = xen_set_pgd;
21428+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
21429 #endif
21430
21431 /* This will work as long as patching hasn't happened yet
21432@@ -1994,6 +2000,7 @@ static const struct pv_mmu_ops xen_mmu_o
21433 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
21434 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
21435 .set_pgd = xen_set_pgd_hyper,
21436+ .set_pgd_batched = xen_set_pgd_hyper,
21437
21438 .alloc_pud = xen_alloc_pmd_init,
21439 .release_pud = xen_release_pmd_init,
21440diff -urNp linux-3.0.4/arch/x86/xen/smp.c linux-3.0.4/arch/x86/xen/smp.c
21441--- linux-3.0.4/arch/x86/xen/smp.c 2011-08-29 23:26:13.000000000 -0400
21442+++ linux-3.0.4/arch/x86/xen/smp.c 2011-08-29 23:26:21.000000000 -0400
21443@@ -193,11 +193,6 @@ static void __init xen_smp_prepare_boot_
21444 {
21445 BUG_ON(smp_processor_id() != 0);
21446 native_smp_prepare_boot_cpu();
21447-
21448- /* We've switched to the "real" per-cpu gdt, so make sure the
21449- old memory can be recycled */
21450- make_lowmem_page_readwrite(xen_initial_gdt);
21451-
21452 xen_filter_cpu_maps();
21453 xen_setup_vcpu_info_placement();
21454 }
21455@@ -265,12 +260,12 @@ cpu_initialize_context(unsigned int cpu,
21456 gdt = get_cpu_gdt_table(cpu);
21457
21458 ctxt->flags = VGCF_IN_KERNEL;
21459- ctxt->user_regs.ds = __USER_DS;
21460- ctxt->user_regs.es = __USER_DS;
21461+ ctxt->user_regs.ds = __KERNEL_DS;
21462+ ctxt->user_regs.es = __KERNEL_DS;
21463 ctxt->user_regs.ss = __KERNEL_DS;
21464 #ifdef CONFIG_X86_32
21465 ctxt->user_regs.fs = __KERNEL_PERCPU;
21466- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
21467+ savesegment(gs, ctxt->user_regs.gs);
21468 #else
21469 ctxt->gs_base_kernel = per_cpu_offset(cpu);
21470 #endif
21471@@ -321,13 +316,12 @@ static int __cpuinit xen_cpu_up(unsigned
21472 int rc;
21473
21474 per_cpu(current_task, cpu) = idle;
21475+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
21476 #ifdef CONFIG_X86_32
21477 irq_ctx_init(cpu);
21478 #else
21479 clear_tsk_thread_flag(idle, TIF_FORK);
21480- per_cpu(kernel_stack, cpu) =
21481- (unsigned long)task_stack_page(idle) -
21482- KERNEL_STACK_OFFSET + THREAD_SIZE;
21483+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21484 #endif
21485 xen_setup_runstate_info(cpu);
21486 xen_setup_timer(cpu);
21487diff -urNp linux-3.0.4/arch/x86/xen/xen-asm_32.S linux-3.0.4/arch/x86/xen/xen-asm_32.S
21488--- linux-3.0.4/arch/x86/xen/xen-asm_32.S 2011-07-21 22:17:23.000000000 -0400
21489+++ linux-3.0.4/arch/x86/xen/xen-asm_32.S 2011-08-23 21:47:55.000000000 -0400
21490@@ -83,14 +83,14 @@ ENTRY(xen_iret)
21491 ESP_OFFSET=4 # bytes pushed onto stack
21492
21493 /*
21494- * Store vcpu_info pointer for easy access. Do it this way to
21495- * avoid having to reload %fs
21496+ * Store vcpu_info pointer for easy access.
21497 */
21498 #ifdef CONFIG_SMP
21499- GET_THREAD_INFO(%eax)
21500- movl TI_cpu(%eax), %eax
21501- movl __per_cpu_offset(,%eax,4), %eax
21502- mov xen_vcpu(%eax), %eax
21503+ push %fs
21504+ mov $(__KERNEL_PERCPU), %eax
21505+ mov %eax, %fs
21506+ mov PER_CPU_VAR(xen_vcpu), %eax
21507+ pop %fs
21508 #else
21509 movl xen_vcpu, %eax
21510 #endif
21511diff -urNp linux-3.0.4/arch/x86/xen/xen-head.S linux-3.0.4/arch/x86/xen/xen-head.S
21512--- linux-3.0.4/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
21513+++ linux-3.0.4/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
21514@@ -19,6 +19,17 @@ ENTRY(startup_xen)
21515 #ifdef CONFIG_X86_32
21516 mov %esi,xen_start_info
21517 mov $init_thread_union+THREAD_SIZE,%esp
21518+#ifdef CONFIG_SMP
21519+ movl $cpu_gdt_table,%edi
21520+ movl $__per_cpu_load,%eax
21521+ movw %ax,__KERNEL_PERCPU + 2(%edi)
21522+ rorl $16,%eax
21523+ movb %al,__KERNEL_PERCPU + 4(%edi)
21524+ movb %ah,__KERNEL_PERCPU + 7(%edi)
21525+ movl $__per_cpu_end - 1,%eax
21526+ subl $__per_cpu_start,%eax
21527+ movw %ax,__KERNEL_PERCPU + 0(%edi)
21528+#endif
21529 #else
21530 mov %rsi,xen_start_info
21531 mov $init_thread_union+THREAD_SIZE,%rsp
21532diff -urNp linux-3.0.4/arch/x86/xen/xen-ops.h linux-3.0.4/arch/x86/xen/xen-ops.h
21533--- linux-3.0.4/arch/x86/xen/xen-ops.h 2011-08-23 21:44:40.000000000 -0400
21534+++ linux-3.0.4/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
21535@@ -10,8 +10,6 @@
21536 extern const char xen_hypervisor_callback[];
21537 extern const char xen_failsafe_callback[];
21538
21539-extern void *xen_initial_gdt;
21540-
21541 struct trap_info;
21542 void xen_copy_trap_info(struct trap_info *traps);
21543
21544diff -urNp linux-3.0.4/block/blk-iopoll.c linux-3.0.4/block/blk-iopoll.c
21545--- linux-3.0.4/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
21546+++ linux-3.0.4/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
21547@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
21548 }
21549 EXPORT_SYMBOL(blk_iopoll_complete);
21550
21551-static void blk_iopoll_softirq(struct softirq_action *h)
21552+static void blk_iopoll_softirq(void)
21553 {
21554 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
21555 int rearm = 0, budget = blk_iopoll_budget;
21556diff -urNp linux-3.0.4/block/blk-map.c linux-3.0.4/block/blk-map.c
21557--- linux-3.0.4/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
21558+++ linux-3.0.4/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
21559@@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
21560 if (!len || !kbuf)
21561 return -EINVAL;
21562
21563- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
21564+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
21565 if (do_copy)
21566 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
21567 else
21568diff -urNp linux-3.0.4/block/blk-softirq.c linux-3.0.4/block/blk-softirq.c
21569--- linux-3.0.4/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
21570+++ linux-3.0.4/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
21571@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
21572 * Softirq action handler - move entries to local list and loop over them
21573 * while passing them to the queue registered handler.
21574 */
21575-static void blk_done_softirq(struct softirq_action *h)
21576+static void blk_done_softirq(void)
21577 {
21578 struct list_head *cpu_list, local_list;
21579
21580diff -urNp linux-3.0.4/block/bsg.c linux-3.0.4/block/bsg.c
21581--- linux-3.0.4/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
21582+++ linux-3.0.4/block/bsg.c 2011-08-23 21:47:55.000000000 -0400
21583@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
21584 struct sg_io_v4 *hdr, struct bsg_device *bd,
21585 fmode_t has_write_perm)
21586 {
21587+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21588+ unsigned char *cmdptr;
21589+
21590 if (hdr->request_len > BLK_MAX_CDB) {
21591 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
21592 if (!rq->cmd)
21593 return -ENOMEM;
21594- }
21595+ cmdptr = rq->cmd;
21596+ } else
21597+ cmdptr = tmpcmd;
21598
21599- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
21600+ if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
21601 hdr->request_len))
21602 return -EFAULT;
21603
21604+ if (cmdptr != rq->cmd)
21605+ memcpy(rq->cmd, cmdptr, hdr->request_len);
21606+
21607 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
21608 if (blk_verify_command(rq->cmd, has_write_perm))
21609 return -EPERM;
21610diff -urNp linux-3.0.4/block/scsi_ioctl.c linux-3.0.4/block/scsi_ioctl.c
21611--- linux-3.0.4/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
21612+++ linux-3.0.4/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
21613@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
21614 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
21615 struct sg_io_hdr *hdr, fmode_t mode)
21616 {
21617- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
21618+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21619+ unsigned char *cmdptr;
21620+
21621+ if (rq->cmd != rq->__cmd)
21622+ cmdptr = rq->cmd;
21623+ else
21624+ cmdptr = tmpcmd;
21625+
21626+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
21627 return -EFAULT;
21628+
21629+ if (cmdptr != rq->cmd)
21630+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
21631+
21632 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
21633 return -EPERM;
21634
21635@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
21636 int err;
21637 unsigned int in_len, out_len, bytes, opcode, cmdlen;
21638 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
21639+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21640+ unsigned char *cmdptr;
21641
21642 if (!sic)
21643 return -EINVAL;
21644@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
21645 */
21646 err = -EFAULT;
21647 rq->cmd_len = cmdlen;
21648- if (copy_from_user(rq->cmd, sic->data, cmdlen))
21649+
21650+ if (rq->cmd != rq->__cmd)
21651+ cmdptr = rq->cmd;
21652+ else
21653+ cmdptr = tmpcmd;
21654+
21655+ if (copy_from_user(cmdptr, sic->data, cmdlen))
21656 goto error;
21657
21658+ if (rq->cmd != cmdptr)
21659+ memcpy(rq->cmd, cmdptr, cmdlen);
21660+
21661 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
21662 goto error;
21663
21664diff -urNp linux-3.0.4/crypto/cryptd.c linux-3.0.4/crypto/cryptd.c
21665--- linux-3.0.4/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
21666+++ linux-3.0.4/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
21667@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
21668
21669 struct cryptd_blkcipher_request_ctx {
21670 crypto_completion_t complete;
21671-};
21672+} __no_const;
21673
21674 struct cryptd_hash_ctx {
21675 struct crypto_shash *child;
21676@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
21677
21678 struct cryptd_aead_request_ctx {
21679 crypto_completion_t complete;
21680-};
21681+} __no_const;
21682
21683 static void cryptd_queue_worker(struct work_struct *work);
21684
21685diff -urNp linux-3.0.4/crypto/gf128mul.c linux-3.0.4/crypto/gf128mul.c
21686--- linux-3.0.4/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
21687+++ linux-3.0.4/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
21688@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
21689 for (i = 0; i < 7; ++i)
21690 gf128mul_x_lle(&p[i + 1], &p[i]);
21691
21692- memset(r, 0, sizeof(r));
21693+ memset(r, 0, sizeof(*r));
21694 for (i = 0;;) {
21695 u8 ch = ((u8 *)b)[15 - i];
21696
21697@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
21698 for (i = 0; i < 7; ++i)
21699 gf128mul_x_bbe(&p[i + 1], &p[i]);
21700
21701- memset(r, 0, sizeof(r));
21702+ memset(r, 0, sizeof(*r));
21703 for (i = 0;;) {
21704 u8 ch = ((u8 *)b)[i];
21705
21706diff -urNp linux-3.0.4/crypto/serpent.c linux-3.0.4/crypto/serpent.c
21707--- linux-3.0.4/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
21708+++ linux-3.0.4/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
21709@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
21710 u32 r0,r1,r2,r3,r4;
21711 int i;
21712
21713+ pax_track_stack();
21714+
21715 /* Copy key, add padding */
21716
21717 for (i = 0; i < keylen; ++i)
21718diff -urNp linux-3.0.4/Documentation/dontdiff linux-3.0.4/Documentation/dontdiff
21719--- linux-3.0.4/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
21720+++ linux-3.0.4/Documentation/dontdiff 2011-08-23 21:47:55.000000000 -0400
21721@@ -5,6 +5,7 @@
21722 *.cis
21723 *.cpio
21724 *.csp
21725+*.dbg
21726 *.dsp
21727 *.dvi
21728 *.elf
21729@@ -48,9 +49,11 @@
21730 *.tab.h
21731 *.tex
21732 *.ver
21733+*.vim
21734 *.xml
21735 *.xz
21736 *_MODULES
21737+*_reg_safe.h
21738 *_vga16.c
21739 *~
21740 \#*#
21741@@ -70,6 +73,7 @@ Kerntypes
21742 Module.markers
21743 Module.symvers
21744 PENDING
21745+PERF*
21746 SCCS
21747 System.map*
21748 TAGS
21749@@ -98,6 +102,8 @@ bzImage*
21750 capability_names.h
21751 capflags.c
21752 classlist.h*
21753+clut_vga16.c
21754+common-cmds.h
21755 comp*.log
21756 compile.h*
21757 conf
21758@@ -126,12 +132,14 @@ fore200e_pca_fw.c*
21759 gconf
21760 gconf.glade.h
21761 gen-devlist
21762+gen-kdb_cmds.c
21763 gen_crc32table
21764 gen_init_cpio
21765 generated
21766 genheaders
21767 genksyms
21768 *_gray256.c
21769+hash
21770 hpet_example
21771 hugepage-mmap
21772 hugepage-shm
21773@@ -146,7 +154,6 @@ int32.c
21774 int4.c
21775 int8.c
21776 kallsyms
21777-kconfig
21778 keywords.c
21779 ksym.c*
21780 ksym.h*
21781@@ -154,7 +161,6 @@ kxgettext
21782 lkc_defs.h
21783 lex.c
21784 lex.*.c
21785-linux
21786 logo_*.c
21787 logo_*_clut224.c
21788 logo_*_mono.c
21789@@ -174,6 +180,7 @@ mkboot
21790 mkbugboot
21791 mkcpustr
21792 mkdep
21793+mkpiggy
21794 mkprep
21795 mkregtable
21796 mktables
21797@@ -209,6 +216,7 @@ r300_reg_safe.h
21798 r420_reg_safe.h
21799 r600_reg_safe.h
21800 recordmcount
21801+regdb.c
21802 relocs
21803 rlim_names.h
21804 rn50_reg_safe.h
21805@@ -219,6 +227,7 @@ setup
21806 setup.bin
21807 setup.elf
21808 sImage
21809+slabinfo
21810 sm_tbl*
21811 split-include
21812 syscalltab.h
21813@@ -246,7 +255,9 @@ vmlinux
21814 vmlinux-*
21815 vmlinux.aout
21816 vmlinux.bin.all
21817+vmlinux.bin.bz2
21818 vmlinux.lds
21819+vmlinux.relocs
21820 vmlinuz
21821 voffset.h
21822 vsyscall.lds
21823@@ -254,6 +265,7 @@ vsyscall_32.lds
21824 wanxlfw.inc
21825 uImage
21826 unifdef
21827+utsrelease.h
21828 wakeup.bin
21829 wakeup.elf
21830 wakeup.lds
21831diff -urNp linux-3.0.4/Documentation/kernel-parameters.txt linux-3.0.4/Documentation/kernel-parameters.txt
21832--- linux-3.0.4/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
21833+++ linux-3.0.4/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
21834@@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
21835 the specified number of seconds. This is to be used if
21836 your oopses keep scrolling off the screen.
21837
21838+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
21839+ virtualization environments that don't cope well with the
21840+ expand down segment used by UDEREF on X86-32 or the frequent
21841+ page table updates on X86-64.
21842+
21843+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
21844+
21845 pcbit= [HW,ISDN]
21846
21847 pcd. [PARIDE]
21848diff -urNp linux-3.0.4/drivers/acpi/apei/cper.c linux-3.0.4/drivers/acpi/apei/cper.c
21849--- linux-3.0.4/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
21850+++ linux-3.0.4/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
21851@@ -38,12 +38,12 @@
21852 */
21853 u64 cper_next_record_id(void)
21854 {
21855- static atomic64_t seq;
21856+ static atomic64_unchecked_t seq;
21857
21858- if (!atomic64_read(&seq))
21859- atomic64_set(&seq, ((u64)get_seconds()) << 32);
21860+ if (!atomic64_read_unchecked(&seq))
21861+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
21862
21863- return atomic64_inc_return(&seq);
21864+ return atomic64_inc_return_unchecked(&seq);
21865 }
21866 EXPORT_SYMBOL_GPL(cper_next_record_id);
21867
21868diff -urNp linux-3.0.4/drivers/acpi/ec_sys.c linux-3.0.4/drivers/acpi/ec_sys.c
21869--- linux-3.0.4/drivers/acpi/ec_sys.c 2011-07-21 22:17:23.000000000 -0400
21870+++ linux-3.0.4/drivers/acpi/ec_sys.c 2011-08-24 19:06:55.000000000 -0400
21871@@ -11,6 +11,7 @@
21872 #include <linux/kernel.h>
21873 #include <linux/acpi.h>
21874 #include <linux/debugfs.h>
21875+#include <asm/uaccess.h>
21876 #include "internal.h"
21877
21878 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
21879@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
21880 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
21881 */
21882 unsigned int size = EC_SPACE_SIZE;
21883- u8 *data = (u8 *) buf;
21884+ u8 data;
21885 loff_t init_off = *off;
21886 int err = 0;
21887
21888@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
21889 size = count;
21890
21891 while (size) {
21892- err = ec_read(*off, &data[*off - init_off]);
21893+ err = ec_read(*off, &data);
21894 if (err)
21895 return err;
21896+ if (put_user(data, &buf[*off - init_off]))
21897+ return -EFAULT;
21898 *off += 1;
21899 size--;
21900 }
21901@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
21902
21903 unsigned int size = count;
21904 loff_t init_off = *off;
21905- u8 *data = (u8 *) buf;
21906 int err = 0;
21907
21908 if (*off >= EC_SPACE_SIZE)
21909@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
21910 }
21911
21912 while (size) {
21913- u8 byte_write = data[*off - init_off];
21914+ u8 byte_write;
21915+ if (get_user(byte_write, &buf[*off - init_off]))
21916+ return -EFAULT;
21917 err = ec_write(*off, byte_write);
21918 if (err)
21919 return err;
21920diff -urNp linux-3.0.4/drivers/acpi/proc.c linux-3.0.4/drivers/acpi/proc.c
21921--- linux-3.0.4/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
21922+++ linux-3.0.4/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
21923@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
21924 size_t count, loff_t * ppos)
21925 {
21926 struct list_head *node, *next;
21927- char strbuf[5];
21928- char str[5] = "";
21929- unsigned int len = count;
21930-
21931- if (len > 4)
21932- len = 4;
21933- if (len < 0)
21934- return -EFAULT;
21935+ char strbuf[5] = {0};
21936
21937- if (copy_from_user(strbuf, buffer, len))
21938+ if (count > 4)
21939+ count = 4;
21940+ if (copy_from_user(strbuf, buffer, count))
21941 return -EFAULT;
21942- strbuf[len] = '\0';
21943- sscanf(strbuf, "%s", str);
21944+ strbuf[count] = '\0';
21945
21946 mutex_lock(&acpi_device_lock);
21947 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
21948@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
21949 if (!dev->wakeup.flags.valid)
21950 continue;
21951
21952- if (!strncmp(dev->pnp.bus_id, str, 4)) {
21953+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
21954 if (device_can_wakeup(&dev->dev)) {
21955 bool enable = !device_may_wakeup(&dev->dev);
21956 device_set_wakeup_enable(&dev->dev, enable);
21957diff -urNp linux-3.0.4/drivers/acpi/processor_driver.c linux-3.0.4/drivers/acpi/processor_driver.c
21958--- linux-3.0.4/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
21959+++ linux-3.0.4/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
21960@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
21961 return 0;
21962 #endif
21963
21964- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
21965+ BUG_ON(pr->id >= nr_cpu_ids);
21966
21967 /*
21968 * Buggy BIOS check
21969diff -urNp linux-3.0.4/drivers/ata/libata-core.c linux-3.0.4/drivers/ata/libata-core.c
21970--- linux-3.0.4/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
21971+++ linux-3.0.4/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
21972@@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
21973 struct ata_port *ap;
21974 unsigned int tag;
21975
21976- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21977+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21978 ap = qc->ap;
21979
21980 qc->flags = 0;
21981@@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
21982 struct ata_port *ap;
21983 struct ata_link *link;
21984
21985- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21986+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21987 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
21988 ap = qc->ap;
21989 link = qc->dev->link;
21990@@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
21991 return;
21992
21993 spin_lock(&lock);
21994+ pax_open_kernel();
21995
21996 for (cur = ops->inherits; cur; cur = cur->inherits) {
21997 void **inherit = (void **)cur;
21998@@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
21999 if (IS_ERR(*pp))
22000 *pp = NULL;
22001
22002- ops->inherits = NULL;
22003+ *(struct ata_port_operations **)&ops->inherits = NULL;
22004
22005+ pax_close_kernel();
22006 spin_unlock(&lock);
22007 }
22008
22009diff -urNp linux-3.0.4/drivers/ata/libata-eh.c linux-3.0.4/drivers/ata/libata-eh.c
22010--- linux-3.0.4/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
22011+++ linux-3.0.4/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
22012@@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22013 {
22014 struct ata_link *link;
22015
22016+ pax_track_stack();
22017+
22018 ata_for_each_link(link, ap, HOST_FIRST)
22019 ata_eh_link_report(link);
22020 }
22021diff -urNp linux-3.0.4/drivers/ata/pata_arasan_cf.c linux-3.0.4/drivers/ata/pata_arasan_cf.c
22022--- linux-3.0.4/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
22023+++ linux-3.0.4/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
22024@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22025 /* Handle platform specific quirks */
22026 if (pdata->quirk) {
22027 if (pdata->quirk & CF_BROKEN_PIO) {
22028- ap->ops->set_piomode = NULL;
22029+ pax_open_kernel();
22030+ *(void **)&ap->ops->set_piomode = NULL;
22031+ pax_close_kernel();
22032 ap->pio_mask = 0;
22033 }
22034 if (pdata->quirk & CF_BROKEN_MWDMA)
22035diff -urNp linux-3.0.4/drivers/atm/adummy.c linux-3.0.4/drivers/atm/adummy.c
22036--- linux-3.0.4/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
22037+++ linux-3.0.4/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
22038@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22039 vcc->pop(vcc, skb);
22040 else
22041 dev_kfree_skb_any(skb);
22042- atomic_inc(&vcc->stats->tx);
22043+ atomic_inc_unchecked(&vcc->stats->tx);
22044
22045 return 0;
22046 }
22047diff -urNp linux-3.0.4/drivers/atm/ambassador.c linux-3.0.4/drivers/atm/ambassador.c
22048--- linux-3.0.4/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
22049+++ linux-3.0.4/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
22050@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22051 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22052
22053 // VC layer stats
22054- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22055+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22056
22057 // free the descriptor
22058 kfree (tx_descr);
22059@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22060 dump_skb ("<<<", vc, skb);
22061
22062 // VC layer stats
22063- atomic_inc(&atm_vcc->stats->rx);
22064+ atomic_inc_unchecked(&atm_vcc->stats->rx);
22065 __net_timestamp(skb);
22066 // end of our responsibility
22067 atm_vcc->push (atm_vcc, skb);
22068@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22069 } else {
22070 PRINTK (KERN_INFO, "dropped over-size frame");
22071 // should we count this?
22072- atomic_inc(&atm_vcc->stats->rx_drop);
22073+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22074 }
22075
22076 } else {
22077@@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22078 }
22079
22080 if (check_area (skb->data, skb->len)) {
22081- atomic_inc(&atm_vcc->stats->tx_err);
22082+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22083 return -ENOMEM; // ?
22084 }
22085
22086diff -urNp linux-3.0.4/drivers/atm/atmtcp.c linux-3.0.4/drivers/atm/atmtcp.c
22087--- linux-3.0.4/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
22088+++ linux-3.0.4/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
22089@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22090 if (vcc->pop) vcc->pop(vcc,skb);
22091 else dev_kfree_skb(skb);
22092 if (dev_data) return 0;
22093- atomic_inc(&vcc->stats->tx_err);
22094+ atomic_inc_unchecked(&vcc->stats->tx_err);
22095 return -ENOLINK;
22096 }
22097 size = skb->len+sizeof(struct atmtcp_hdr);
22098@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22099 if (!new_skb) {
22100 if (vcc->pop) vcc->pop(vcc,skb);
22101 else dev_kfree_skb(skb);
22102- atomic_inc(&vcc->stats->tx_err);
22103+ atomic_inc_unchecked(&vcc->stats->tx_err);
22104 return -ENOBUFS;
22105 }
22106 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22107@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22108 if (vcc->pop) vcc->pop(vcc,skb);
22109 else dev_kfree_skb(skb);
22110 out_vcc->push(out_vcc,new_skb);
22111- atomic_inc(&vcc->stats->tx);
22112- atomic_inc(&out_vcc->stats->rx);
22113+ atomic_inc_unchecked(&vcc->stats->tx);
22114+ atomic_inc_unchecked(&out_vcc->stats->rx);
22115 return 0;
22116 }
22117
22118@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22119 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22120 read_unlock(&vcc_sklist_lock);
22121 if (!out_vcc) {
22122- atomic_inc(&vcc->stats->tx_err);
22123+ atomic_inc_unchecked(&vcc->stats->tx_err);
22124 goto done;
22125 }
22126 skb_pull(skb,sizeof(struct atmtcp_hdr));
22127@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22128 __net_timestamp(new_skb);
22129 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22130 out_vcc->push(out_vcc,new_skb);
22131- atomic_inc(&vcc->stats->tx);
22132- atomic_inc(&out_vcc->stats->rx);
22133+ atomic_inc_unchecked(&vcc->stats->tx);
22134+ atomic_inc_unchecked(&out_vcc->stats->rx);
22135 done:
22136 if (vcc->pop) vcc->pop(vcc,skb);
22137 else dev_kfree_skb(skb);
22138diff -urNp linux-3.0.4/drivers/atm/eni.c linux-3.0.4/drivers/atm/eni.c
22139--- linux-3.0.4/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
22140+++ linux-3.0.4/drivers/atm/eni.c 2011-08-23 21:47:55.000000000 -0400
22141@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22142 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22143 vcc->dev->number);
22144 length = 0;
22145- atomic_inc(&vcc->stats->rx_err);
22146+ atomic_inc_unchecked(&vcc->stats->rx_err);
22147 }
22148 else {
22149 length = ATM_CELL_SIZE-1; /* no HEC */
22150@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22151 size);
22152 }
22153 eff = length = 0;
22154- atomic_inc(&vcc->stats->rx_err);
22155+ atomic_inc_unchecked(&vcc->stats->rx_err);
22156 }
22157 else {
22158 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22159@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22160 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22161 vcc->dev->number,vcc->vci,length,size << 2,descr);
22162 length = eff = 0;
22163- atomic_inc(&vcc->stats->rx_err);
22164+ atomic_inc_unchecked(&vcc->stats->rx_err);
22165 }
22166 }
22167 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22168@@ -771,7 +771,7 @@ rx_dequeued++;
22169 vcc->push(vcc,skb);
22170 pushed++;
22171 }
22172- atomic_inc(&vcc->stats->rx);
22173+ atomic_inc_unchecked(&vcc->stats->rx);
22174 }
22175 wake_up(&eni_dev->rx_wait);
22176 }
22177@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22178 PCI_DMA_TODEVICE);
22179 if (vcc->pop) vcc->pop(vcc,skb);
22180 else dev_kfree_skb_irq(skb);
22181- atomic_inc(&vcc->stats->tx);
22182+ atomic_inc_unchecked(&vcc->stats->tx);
22183 wake_up(&eni_dev->tx_wait);
22184 dma_complete++;
22185 }
22186diff -urNp linux-3.0.4/drivers/atm/firestream.c linux-3.0.4/drivers/atm/firestream.c
22187--- linux-3.0.4/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
22188+++ linux-3.0.4/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
22189@@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22190 }
22191 }
22192
22193- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22194+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22195
22196 fs_dprintk (FS_DEBUG_TXMEM, "i");
22197 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22198@@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22199 #endif
22200 skb_put (skb, qe->p1 & 0xffff);
22201 ATM_SKB(skb)->vcc = atm_vcc;
22202- atomic_inc(&atm_vcc->stats->rx);
22203+ atomic_inc_unchecked(&atm_vcc->stats->rx);
22204 __net_timestamp(skb);
22205 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22206 atm_vcc->push (atm_vcc, skb);
22207@@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22208 kfree (pe);
22209 }
22210 if (atm_vcc)
22211- atomic_inc(&atm_vcc->stats->rx_drop);
22212+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22213 break;
22214 case 0x1f: /* Reassembly abort: no buffers. */
22215 /* Silently increment error counter. */
22216 if (atm_vcc)
22217- atomic_inc(&atm_vcc->stats->rx_drop);
22218+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22219 break;
22220 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22221 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22222diff -urNp linux-3.0.4/drivers/atm/fore200e.c linux-3.0.4/drivers/atm/fore200e.c
22223--- linux-3.0.4/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
22224+++ linux-3.0.4/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
22225@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22226 #endif
22227 /* check error condition */
22228 if (*entry->status & STATUS_ERROR)
22229- atomic_inc(&vcc->stats->tx_err);
22230+ atomic_inc_unchecked(&vcc->stats->tx_err);
22231 else
22232- atomic_inc(&vcc->stats->tx);
22233+ atomic_inc_unchecked(&vcc->stats->tx);
22234 }
22235 }
22236
22237@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22238 if (skb == NULL) {
22239 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22240
22241- atomic_inc(&vcc->stats->rx_drop);
22242+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22243 return -ENOMEM;
22244 }
22245
22246@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22247
22248 dev_kfree_skb_any(skb);
22249
22250- atomic_inc(&vcc->stats->rx_drop);
22251+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22252 return -ENOMEM;
22253 }
22254
22255 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22256
22257 vcc->push(vcc, skb);
22258- atomic_inc(&vcc->stats->rx);
22259+ atomic_inc_unchecked(&vcc->stats->rx);
22260
22261 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22262
22263@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22264 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22265 fore200e->atm_dev->number,
22266 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22267- atomic_inc(&vcc->stats->rx_err);
22268+ atomic_inc_unchecked(&vcc->stats->rx_err);
22269 }
22270 }
22271
22272@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22273 goto retry_here;
22274 }
22275
22276- atomic_inc(&vcc->stats->tx_err);
22277+ atomic_inc_unchecked(&vcc->stats->tx_err);
22278
22279 fore200e->tx_sat++;
22280 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22281diff -urNp linux-3.0.4/drivers/atm/he.c linux-3.0.4/drivers/atm/he.c
22282--- linux-3.0.4/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
22283+++ linux-3.0.4/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
22284@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22285
22286 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22287 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22288- atomic_inc(&vcc->stats->rx_drop);
22289+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22290 goto return_host_buffers;
22291 }
22292
22293@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22294 RBRQ_LEN_ERR(he_dev->rbrq_head)
22295 ? "LEN_ERR" : "",
22296 vcc->vpi, vcc->vci);
22297- atomic_inc(&vcc->stats->rx_err);
22298+ atomic_inc_unchecked(&vcc->stats->rx_err);
22299 goto return_host_buffers;
22300 }
22301
22302@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22303 vcc->push(vcc, skb);
22304 spin_lock(&he_dev->global_lock);
22305
22306- atomic_inc(&vcc->stats->rx);
22307+ atomic_inc_unchecked(&vcc->stats->rx);
22308
22309 return_host_buffers:
22310 ++pdus_assembled;
22311@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22312 tpd->vcc->pop(tpd->vcc, tpd->skb);
22313 else
22314 dev_kfree_skb_any(tpd->skb);
22315- atomic_inc(&tpd->vcc->stats->tx_err);
22316+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22317 }
22318 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22319 return;
22320@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22321 vcc->pop(vcc, skb);
22322 else
22323 dev_kfree_skb_any(skb);
22324- atomic_inc(&vcc->stats->tx_err);
22325+ atomic_inc_unchecked(&vcc->stats->tx_err);
22326 return -EINVAL;
22327 }
22328
22329@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22330 vcc->pop(vcc, skb);
22331 else
22332 dev_kfree_skb_any(skb);
22333- atomic_inc(&vcc->stats->tx_err);
22334+ atomic_inc_unchecked(&vcc->stats->tx_err);
22335 return -EINVAL;
22336 }
22337 #endif
22338@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22339 vcc->pop(vcc, skb);
22340 else
22341 dev_kfree_skb_any(skb);
22342- atomic_inc(&vcc->stats->tx_err);
22343+ atomic_inc_unchecked(&vcc->stats->tx_err);
22344 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22345 return -ENOMEM;
22346 }
22347@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22348 vcc->pop(vcc, skb);
22349 else
22350 dev_kfree_skb_any(skb);
22351- atomic_inc(&vcc->stats->tx_err);
22352+ atomic_inc_unchecked(&vcc->stats->tx_err);
22353 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22354 return -ENOMEM;
22355 }
22356@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22357 __enqueue_tpd(he_dev, tpd, cid);
22358 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22359
22360- atomic_inc(&vcc->stats->tx);
22361+ atomic_inc_unchecked(&vcc->stats->tx);
22362
22363 return 0;
22364 }
22365diff -urNp linux-3.0.4/drivers/atm/horizon.c linux-3.0.4/drivers/atm/horizon.c
22366--- linux-3.0.4/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
22367+++ linux-3.0.4/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
22368@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22369 {
22370 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22371 // VC layer stats
22372- atomic_inc(&vcc->stats->rx);
22373+ atomic_inc_unchecked(&vcc->stats->rx);
22374 __net_timestamp(skb);
22375 // end of our responsibility
22376 vcc->push (vcc, skb);
22377@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22378 dev->tx_iovec = NULL;
22379
22380 // VC layer stats
22381- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22382+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22383
22384 // free the skb
22385 hrz_kfree_skb (skb);
22386diff -urNp linux-3.0.4/drivers/atm/idt77252.c linux-3.0.4/drivers/atm/idt77252.c
22387--- linux-3.0.4/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
22388+++ linux-3.0.4/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
22389@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22390 else
22391 dev_kfree_skb(skb);
22392
22393- atomic_inc(&vcc->stats->tx);
22394+ atomic_inc_unchecked(&vcc->stats->tx);
22395 }
22396
22397 atomic_dec(&scq->used);
22398@@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22399 if ((sb = dev_alloc_skb(64)) == NULL) {
22400 printk("%s: Can't allocate buffers for aal0.\n",
22401 card->name);
22402- atomic_add(i, &vcc->stats->rx_drop);
22403+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
22404 break;
22405 }
22406 if (!atm_charge(vcc, sb->truesize)) {
22407 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22408 card->name);
22409- atomic_add(i - 1, &vcc->stats->rx_drop);
22410+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22411 dev_kfree_skb(sb);
22412 break;
22413 }
22414@@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22415 ATM_SKB(sb)->vcc = vcc;
22416 __net_timestamp(sb);
22417 vcc->push(vcc, sb);
22418- atomic_inc(&vcc->stats->rx);
22419+ atomic_inc_unchecked(&vcc->stats->rx);
22420
22421 cell += ATM_CELL_PAYLOAD;
22422 }
22423@@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22424 "(CDC: %08x)\n",
22425 card->name, len, rpp->len, readl(SAR_REG_CDC));
22426 recycle_rx_pool_skb(card, rpp);
22427- atomic_inc(&vcc->stats->rx_err);
22428+ atomic_inc_unchecked(&vcc->stats->rx_err);
22429 return;
22430 }
22431 if (stat & SAR_RSQE_CRC) {
22432 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22433 recycle_rx_pool_skb(card, rpp);
22434- atomic_inc(&vcc->stats->rx_err);
22435+ atomic_inc_unchecked(&vcc->stats->rx_err);
22436 return;
22437 }
22438 if (skb_queue_len(&rpp->queue) > 1) {
22439@@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22440 RXPRINTK("%s: Can't alloc RX skb.\n",
22441 card->name);
22442 recycle_rx_pool_skb(card, rpp);
22443- atomic_inc(&vcc->stats->rx_err);
22444+ atomic_inc_unchecked(&vcc->stats->rx_err);
22445 return;
22446 }
22447 if (!atm_charge(vcc, skb->truesize)) {
22448@@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22449 __net_timestamp(skb);
22450
22451 vcc->push(vcc, skb);
22452- atomic_inc(&vcc->stats->rx);
22453+ atomic_inc_unchecked(&vcc->stats->rx);
22454
22455 return;
22456 }
22457@@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22458 __net_timestamp(skb);
22459
22460 vcc->push(vcc, skb);
22461- atomic_inc(&vcc->stats->rx);
22462+ atomic_inc_unchecked(&vcc->stats->rx);
22463
22464 if (skb->truesize > SAR_FB_SIZE_3)
22465 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22466@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22467 if (vcc->qos.aal != ATM_AAL0) {
22468 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22469 card->name, vpi, vci);
22470- atomic_inc(&vcc->stats->rx_drop);
22471+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22472 goto drop;
22473 }
22474
22475 if ((sb = dev_alloc_skb(64)) == NULL) {
22476 printk("%s: Can't allocate buffers for AAL0.\n",
22477 card->name);
22478- atomic_inc(&vcc->stats->rx_err);
22479+ atomic_inc_unchecked(&vcc->stats->rx_err);
22480 goto drop;
22481 }
22482
22483@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22484 ATM_SKB(sb)->vcc = vcc;
22485 __net_timestamp(sb);
22486 vcc->push(vcc, sb);
22487- atomic_inc(&vcc->stats->rx);
22488+ atomic_inc_unchecked(&vcc->stats->rx);
22489
22490 drop:
22491 skb_pull(queue, 64);
22492@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22493
22494 if (vc == NULL) {
22495 printk("%s: NULL connection in send().\n", card->name);
22496- atomic_inc(&vcc->stats->tx_err);
22497+ atomic_inc_unchecked(&vcc->stats->tx_err);
22498 dev_kfree_skb(skb);
22499 return -EINVAL;
22500 }
22501 if (!test_bit(VCF_TX, &vc->flags)) {
22502 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22503- atomic_inc(&vcc->stats->tx_err);
22504+ atomic_inc_unchecked(&vcc->stats->tx_err);
22505 dev_kfree_skb(skb);
22506 return -EINVAL;
22507 }
22508@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22509 break;
22510 default:
22511 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22512- atomic_inc(&vcc->stats->tx_err);
22513+ atomic_inc_unchecked(&vcc->stats->tx_err);
22514 dev_kfree_skb(skb);
22515 return -EINVAL;
22516 }
22517
22518 if (skb_shinfo(skb)->nr_frags != 0) {
22519 printk("%s: No scatter-gather yet.\n", card->name);
22520- atomic_inc(&vcc->stats->tx_err);
22521+ atomic_inc_unchecked(&vcc->stats->tx_err);
22522 dev_kfree_skb(skb);
22523 return -EINVAL;
22524 }
22525@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22526
22527 err = queue_skb(card, vc, skb, oam);
22528 if (err) {
22529- atomic_inc(&vcc->stats->tx_err);
22530+ atomic_inc_unchecked(&vcc->stats->tx_err);
22531 dev_kfree_skb(skb);
22532 return err;
22533 }
22534@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22535 skb = dev_alloc_skb(64);
22536 if (!skb) {
22537 printk("%s: Out of memory in send_oam().\n", card->name);
22538- atomic_inc(&vcc->stats->tx_err);
22539+ atomic_inc_unchecked(&vcc->stats->tx_err);
22540 return -ENOMEM;
22541 }
22542 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22543diff -urNp linux-3.0.4/drivers/atm/iphase.c linux-3.0.4/drivers/atm/iphase.c
22544--- linux-3.0.4/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
22545+++ linux-3.0.4/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
22546@@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
22547 status = (u_short) (buf_desc_ptr->desc_mode);
22548 if (status & (RX_CER | RX_PTE | RX_OFL))
22549 {
22550- atomic_inc(&vcc->stats->rx_err);
22551+ atomic_inc_unchecked(&vcc->stats->rx_err);
22552 IF_ERR(printk("IA: bad packet, dropping it");)
22553 if (status & RX_CER) {
22554 IF_ERR(printk(" cause: packet CRC error\n");)
22555@@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
22556 len = dma_addr - buf_addr;
22557 if (len > iadev->rx_buf_sz) {
22558 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22559- atomic_inc(&vcc->stats->rx_err);
22560+ atomic_inc_unchecked(&vcc->stats->rx_err);
22561 goto out_free_desc;
22562 }
22563
22564@@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
22565 ia_vcc = INPH_IA_VCC(vcc);
22566 if (ia_vcc == NULL)
22567 {
22568- atomic_inc(&vcc->stats->rx_err);
22569+ atomic_inc_unchecked(&vcc->stats->rx_err);
22570 dev_kfree_skb_any(skb);
22571 atm_return(vcc, atm_guess_pdu2truesize(len));
22572 goto INCR_DLE;
22573@@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
22574 if ((length > iadev->rx_buf_sz) || (length >
22575 (skb->len - sizeof(struct cpcs_trailer))))
22576 {
22577- atomic_inc(&vcc->stats->rx_err);
22578+ atomic_inc_unchecked(&vcc->stats->rx_err);
22579 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
22580 length, skb->len);)
22581 dev_kfree_skb_any(skb);
22582@@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
22583
22584 IF_RX(printk("rx_dle_intr: skb push");)
22585 vcc->push(vcc,skb);
22586- atomic_inc(&vcc->stats->rx);
22587+ atomic_inc_unchecked(&vcc->stats->rx);
22588 iadev->rx_pkt_cnt++;
22589 }
22590 INCR_DLE:
22591@@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
22592 {
22593 struct k_sonet_stats *stats;
22594 stats = &PRIV(_ia_dev[board])->sonet_stats;
22595- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22596- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
22597- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
22598- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
22599- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
22600- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
22601- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22602- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
22603- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
22604+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22605+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
22606+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
22607+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
22608+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
22609+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22610+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22611+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
22612+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
22613 }
22614 ia_cmds.status = 0;
22615 break;
22616@@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22617 if ((desc == 0) || (desc > iadev->num_tx_desc))
22618 {
22619 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
22620- atomic_inc(&vcc->stats->tx);
22621+ atomic_inc_unchecked(&vcc->stats->tx);
22622 if (vcc->pop)
22623 vcc->pop(vcc, skb);
22624 else
22625@@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22626 ATM_DESC(skb) = vcc->vci;
22627 skb_queue_tail(&iadev->tx_dma_q, skb);
22628
22629- atomic_inc(&vcc->stats->tx);
22630+ atomic_inc_unchecked(&vcc->stats->tx);
22631 iadev->tx_pkt_cnt++;
22632 /* Increment transaction counter */
22633 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
22634
22635 #if 0
22636 /* add flow control logic */
22637- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22638+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22639 if (iavcc->vc_desc_cnt > 10) {
22640 vcc->tx_quota = vcc->tx_quota * 3 / 4;
22641 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22642diff -urNp linux-3.0.4/drivers/atm/lanai.c linux-3.0.4/drivers/atm/lanai.c
22643--- linux-3.0.4/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
22644+++ linux-3.0.4/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
22645@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22646 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22647 lanai_endtx(lanai, lvcc);
22648 lanai_free_skb(lvcc->tx.atmvcc, skb);
22649- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22650+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22651 }
22652
22653 /* Try to fill the buffer - don't call unless there is backlog */
22654@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22655 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22656 __net_timestamp(skb);
22657 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22658- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22659+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22660 out:
22661 lvcc->rx.buf.ptr = end;
22662 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22663@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22664 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22665 "vcc %d\n", lanai->number, (unsigned int) s, vci);
22666 lanai->stats.service_rxnotaal5++;
22667- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22668+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22669 return 0;
22670 }
22671 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22672@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22673 int bytes;
22674 read_unlock(&vcc_sklist_lock);
22675 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22676- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22677+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22678 lvcc->stats.x.aal5.service_trash++;
22679 bytes = (SERVICE_GET_END(s) * 16) -
22680 (((unsigned long) lvcc->rx.buf.ptr) -
22681@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22682 }
22683 if (s & SERVICE_STREAM) {
22684 read_unlock(&vcc_sklist_lock);
22685- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22686+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22687 lvcc->stats.x.aal5.service_stream++;
22688 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22689 "PDU on VCI %d!\n", lanai->number, vci);
22690@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22691 return 0;
22692 }
22693 DPRINTK("got rx crc error on vci %d\n", vci);
22694- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22695+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22696 lvcc->stats.x.aal5.service_rxcrc++;
22697 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22698 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22699diff -urNp linux-3.0.4/drivers/atm/nicstar.c linux-3.0.4/drivers/atm/nicstar.c
22700--- linux-3.0.4/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
22701+++ linux-3.0.4/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
22702@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
22703 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
22704 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
22705 card->index);
22706- atomic_inc(&vcc->stats->tx_err);
22707+ atomic_inc_unchecked(&vcc->stats->tx_err);
22708 dev_kfree_skb_any(skb);
22709 return -EINVAL;
22710 }
22711@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
22712 if (!vc->tx) {
22713 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
22714 card->index);
22715- atomic_inc(&vcc->stats->tx_err);
22716+ atomic_inc_unchecked(&vcc->stats->tx_err);
22717 dev_kfree_skb_any(skb);
22718 return -EINVAL;
22719 }
22720@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
22721 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
22722 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
22723 card->index);
22724- atomic_inc(&vcc->stats->tx_err);
22725+ atomic_inc_unchecked(&vcc->stats->tx_err);
22726 dev_kfree_skb_any(skb);
22727 return -EINVAL;
22728 }
22729
22730 if (skb_shinfo(skb)->nr_frags != 0) {
22731 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22732- atomic_inc(&vcc->stats->tx_err);
22733+ atomic_inc_unchecked(&vcc->stats->tx_err);
22734 dev_kfree_skb_any(skb);
22735 return -EINVAL;
22736 }
22737@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
22738 }
22739
22740 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
22741- atomic_inc(&vcc->stats->tx_err);
22742+ atomic_inc_unchecked(&vcc->stats->tx_err);
22743 dev_kfree_skb_any(skb);
22744 return -EIO;
22745 }
22746- atomic_inc(&vcc->stats->tx);
22747+ atomic_inc_unchecked(&vcc->stats->tx);
22748
22749 return 0;
22750 }
22751@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
22752 printk
22753 ("nicstar%d: Can't allocate buffers for aal0.\n",
22754 card->index);
22755- atomic_add(i, &vcc->stats->rx_drop);
22756+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
22757 break;
22758 }
22759 if (!atm_charge(vcc, sb->truesize)) {
22760 RXPRINTK
22761 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
22762 card->index);
22763- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22764+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22765 dev_kfree_skb_any(sb);
22766 break;
22767 }
22768@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
22769 ATM_SKB(sb)->vcc = vcc;
22770 __net_timestamp(sb);
22771 vcc->push(vcc, sb);
22772- atomic_inc(&vcc->stats->rx);
22773+ atomic_inc_unchecked(&vcc->stats->rx);
22774 cell += ATM_CELL_PAYLOAD;
22775 }
22776
22777@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
22778 if (iovb == NULL) {
22779 printk("nicstar%d: Out of iovec buffers.\n",
22780 card->index);
22781- atomic_inc(&vcc->stats->rx_drop);
22782+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22783 recycle_rx_buf(card, skb);
22784 return;
22785 }
22786@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
22787 small or large buffer itself. */
22788 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
22789 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22790- atomic_inc(&vcc->stats->rx_err);
22791+ atomic_inc_unchecked(&vcc->stats->rx_err);
22792 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22793 NS_MAX_IOVECS);
22794 NS_PRV_IOVCNT(iovb) = 0;
22795@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
22796 ("nicstar%d: Expected a small buffer, and this is not one.\n",
22797 card->index);
22798 which_list(card, skb);
22799- atomic_inc(&vcc->stats->rx_err);
22800+ atomic_inc_unchecked(&vcc->stats->rx_err);
22801 recycle_rx_buf(card, skb);
22802 vc->rx_iov = NULL;
22803 recycle_iov_buf(card, iovb);
22804@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
22805 ("nicstar%d: Expected a large buffer, and this is not one.\n",
22806 card->index);
22807 which_list(card, skb);
22808- atomic_inc(&vcc->stats->rx_err);
22809+ atomic_inc_unchecked(&vcc->stats->rx_err);
22810 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22811 NS_PRV_IOVCNT(iovb));
22812 vc->rx_iov = NULL;
22813@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
22814 printk(" - PDU size mismatch.\n");
22815 else
22816 printk(".\n");
22817- atomic_inc(&vcc->stats->rx_err);
22818+ atomic_inc_unchecked(&vcc->stats->rx_err);
22819 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22820 NS_PRV_IOVCNT(iovb));
22821 vc->rx_iov = NULL;
22822@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
22823 /* skb points to a small buffer */
22824 if (!atm_charge(vcc, skb->truesize)) {
22825 push_rxbufs(card, skb);
22826- atomic_inc(&vcc->stats->rx_drop);
22827+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22828 } else {
22829 skb_put(skb, len);
22830 dequeue_sm_buf(card, skb);
22831@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
22832 ATM_SKB(skb)->vcc = vcc;
22833 __net_timestamp(skb);
22834 vcc->push(vcc, skb);
22835- atomic_inc(&vcc->stats->rx);
22836+ atomic_inc_unchecked(&vcc->stats->rx);
22837 }
22838 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
22839 struct sk_buff *sb;
22840@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
22841 if (len <= NS_SMBUFSIZE) {
22842 if (!atm_charge(vcc, sb->truesize)) {
22843 push_rxbufs(card, sb);
22844- atomic_inc(&vcc->stats->rx_drop);
22845+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22846 } else {
22847 skb_put(sb, len);
22848 dequeue_sm_buf(card, sb);
22849@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
22850 ATM_SKB(sb)->vcc = vcc;
22851 __net_timestamp(sb);
22852 vcc->push(vcc, sb);
22853- atomic_inc(&vcc->stats->rx);
22854+ atomic_inc_unchecked(&vcc->stats->rx);
22855 }
22856
22857 push_rxbufs(card, skb);
22858@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
22859
22860 if (!atm_charge(vcc, skb->truesize)) {
22861 push_rxbufs(card, skb);
22862- atomic_inc(&vcc->stats->rx_drop);
22863+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22864 } else {
22865 dequeue_lg_buf(card, skb);
22866 #ifdef NS_USE_DESTRUCTORS
22867@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
22868 ATM_SKB(skb)->vcc = vcc;
22869 __net_timestamp(skb);
22870 vcc->push(vcc, skb);
22871- atomic_inc(&vcc->stats->rx);
22872+ atomic_inc_unchecked(&vcc->stats->rx);
22873 }
22874
22875 push_rxbufs(card, sb);
22876@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
22877 printk
22878 ("nicstar%d: Out of huge buffers.\n",
22879 card->index);
22880- atomic_inc(&vcc->stats->rx_drop);
22881+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22882 recycle_iovec_rx_bufs(card,
22883 (struct iovec *)
22884 iovb->data,
22885@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
22886 card->hbpool.count++;
22887 } else
22888 dev_kfree_skb_any(hb);
22889- atomic_inc(&vcc->stats->rx_drop);
22890+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22891 } else {
22892 /* Copy the small buffer to the huge buffer */
22893 sb = (struct sk_buff *)iov->iov_base;
22894@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
22895 #endif /* NS_USE_DESTRUCTORS */
22896 __net_timestamp(hb);
22897 vcc->push(vcc, hb);
22898- atomic_inc(&vcc->stats->rx);
22899+ atomic_inc_unchecked(&vcc->stats->rx);
22900 }
22901 }
22902
22903diff -urNp linux-3.0.4/drivers/atm/solos-pci.c linux-3.0.4/drivers/atm/solos-pci.c
22904--- linux-3.0.4/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
22905+++ linux-3.0.4/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
22906@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
22907 }
22908 atm_charge(vcc, skb->truesize);
22909 vcc->push(vcc, skb);
22910- atomic_inc(&vcc->stats->rx);
22911+ atomic_inc_unchecked(&vcc->stats->rx);
22912 break;
22913
22914 case PKT_STATUS:
22915@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
22916 char msg[500];
22917 char item[10];
22918
22919+ pax_track_stack();
22920+
22921 len = buf->len;
22922 for (i = 0; i < len; i++){
22923 if(i % 8 == 0)
22924@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
22925 vcc = SKB_CB(oldskb)->vcc;
22926
22927 if (vcc) {
22928- atomic_inc(&vcc->stats->tx);
22929+ atomic_inc_unchecked(&vcc->stats->tx);
22930 solos_pop(vcc, oldskb);
22931 } else
22932 dev_kfree_skb_irq(oldskb);
22933diff -urNp linux-3.0.4/drivers/atm/suni.c linux-3.0.4/drivers/atm/suni.c
22934--- linux-3.0.4/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
22935+++ linux-3.0.4/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
22936@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
22937
22938
22939 #define ADD_LIMITED(s,v) \
22940- atomic_add((v),&stats->s); \
22941- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
22942+ atomic_add_unchecked((v),&stats->s); \
22943+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
22944
22945
22946 static void suni_hz(unsigned long from_timer)
22947diff -urNp linux-3.0.4/drivers/atm/uPD98402.c linux-3.0.4/drivers/atm/uPD98402.c
22948--- linux-3.0.4/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
22949+++ linux-3.0.4/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
22950@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
22951 struct sonet_stats tmp;
22952 int error = 0;
22953
22954- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22955+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22956 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
22957 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
22958 if (zero && !error) {
22959@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
22960
22961
22962 #define ADD_LIMITED(s,v) \
22963- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
22964- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
22965- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22966+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
22967+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
22968+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22969
22970
22971 static void stat_event(struct atm_dev *dev)
22972@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
22973 if (reason & uPD98402_INT_PFM) stat_event(dev);
22974 if (reason & uPD98402_INT_PCO) {
22975 (void) GET(PCOCR); /* clear interrupt cause */
22976- atomic_add(GET(HECCT),
22977+ atomic_add_unchecked(GET(HECCT),
22978 &PRIV(dev)->sonet_stats.uncorr_hcs);
22979 }
22980 if ((reason & uPD98402_INT_RFO) &&
22981@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
22982 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
22983 uPD98402_INT_LOS),PIMR); /* enable them */
22984 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
22985- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22986- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
22987- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
22988+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22989+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
22990+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
22991 return 0;
22992 }
22993
22994diff -urNp linux-3.0.4/drivers/atm/zatm.c linux-3.0.4/drivers/atm/zatm.c
22995--- linux-3.0.4/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
22996+++ linux-3.0.4/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
22997@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
22998 }
22999 if (!size) {
23000 dev_kfree_skb_irq(skb);
23001- if (vcc) atomic_inc(&vcc->stats->rx_err);
23002+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23003 continue;
23004 }
23005 if (!atm_charge(vcc,skb->truesize)) {
23006@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23007 skb->len = size;
23008 ATM_SKB(skb)->vcc = vcc;
23009 vcc->push(vcc,skb);
23010- atomic_inc(&vcc->stats->rx);
23011+ atomic_inc_unchecked(&vcc->stats->rx);
23012 }
23013 zout(pos & 0xffff,MTA(mbx));
23014 #if 0 /* probably a stupid idea */
23015@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23016 skb_queue_head(&zatm_vcc->backlog,skb);
23017 break;
23018 }
23019- atomic_inc(&vcc->stats->tx);
23020+ atomic_inc_unchecked(&vcc->stats->tx);
23021 wake_up(&zatm_vcc->tx_wait);
23022 }
23023
23024diff -urNp linux-3.0.4/drivers/base/power/wakeup.c linux-3.0.4/drivers/base/power/wakeup.c
23025--- linux-3.0.4/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
23026+++ linux-3.0.4/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
23027@@ -29,14 +29,14 @@ bool events_check_enabled;
23028 * They need to be modified together atomically, so it's better to use one
23029 * atomic variable to hold them both.
23030 */
23031-static atomic_t combined_event_count = ATOMIC_INIT(0);
23032+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23033
23034 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23035 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23036
23037 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23038 {
23039- unsigned int comb = atomic_read(&combined_event_count);
23040+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
23041
23042 *cnt = (comb >> IN_PROGRESS_BITS);
23043 *inpr = comb & MAX_IN_PROGRESS;
23044@@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
23045 ws->last_time = ktime_get();
23046
23047 /* Increment the counter of events in progress. */
23048- atomic_inc(&combined_event_count);
23049+ atomic_inc_unchecked(&combined_event_count);
23050 }
23051
23052 /**
23053@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
23054 * Increment the counter of registered wakeup events and decrement the
23055 * couter of wakeup events in progress simultaneously.
23056 */
23057- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23058+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23059 }
23060
23061 /**
23062diff -urNp linux-3.0.4/drivers/block/cciss.c linux-3.0.4/drivers/block/cciss.c
23063--- linux-3.0.4/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
23064+++ linux-3.0.4/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
23065@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
23066 int err;
23067 u32 cp;
23068
23069+ memset(&arg64, 0, sizeof(arg64));
23070+
23071 err = 0;
23072 err |=
23073 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23074@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
23075 while (!list_empty(&h->reqQ)) {
23076 c = list_entry(h->reqQ.next, CommandList_struct, list);
23077 /* can't do anything if fifo is full */
23078- if ((h->access.fifo_full(h))) {
23079+ if ((h->access->fifo_full(h))) {
23080 dev_warn(&h->pdev->dev, "fifo full\n");
23081 break;
23082 }
23083@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
23084 h->Qdepth--;
23085
23086 /* Tell the controller execute command */
23087- h->access.submit_command(h, c);
23088+ h->access->submit_command(h, c);
23089
23090 /* Put job onto the completed Q */
23091 addQ(&h->cmpQ, c);
23092@@ -3422,17 +3424,17 @@ startio:
23093
23094 static inline unsigned long get_next_completion(ctlr_info_t *h)
23095 {
23096- return h->access.command_completed(h);
23097+ return h->access->command_completed(h);
23098 }
23099
23100 static inline int interrupt_pending(ctlr_info_t *h)
23101 {
23102- return h->access.intr_pending(h);
23103+ return h->access->intr_pending(h);
23104 }
23105
23106 static inline long interrupt_not_for_us(ctlr_info_t *h)
23107 {
23108- return ((h->access.intr_pending(h) == 0) ||
23109+ return ((h->access->intr_pending(h) == 0) ||
23110 (h->interrupts_enabled == 0));
23111 }
23112
23113@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
23114 u32 a;
23115
23116 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23117- return h->access.command_completed(h);
23118+ return h->access->command_completed(h);
23119
23120 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23121 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23122@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
23123 trans_support & CFGTBL_Trans_use_short_tags);
23124
23125 /* Change the access methods to the performant access methods */
23126- h->access = SA5_performant_access;
23127+ h->access = &SA5_performant_access;
23128 h->transMethod = CFGTBL_Trans_Performant;
23129
23130 return;
23131@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
23132 if (prod_index < 0)
23133 return -ENODEV;
23134 h->product_name = products[prod_index].product_name;
23135- h->access = *(products[prod_index].access);
23136+ h->access = products[prod_index].access;
23137
23138 if (cciss_board_disabled(h)) {
23139 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23140@@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
23141 }
23142
23143 /* make sure the board interrupts are off */
23144- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23145+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23146 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
23147 if (rc)
23148 goto clean2;
23149@@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
23150 * fake ones to scoop up any residual completions.
23151 */
23152 spin_lock_irqsave(&h->lock, flags);
23153- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23154+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23155 spin_unlock_irqrestore(&h->lock, flags);
23156 free_irq(h->intr[PERF_MODE_INT], h);
23157 rc = cciss_request_irq(h, cciss_msix_discard_completions,
23158@@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
23159 dev_info(&h->pdev->dev, "Board READY.\n");
23160 dev_info(&h->pdev->dev,
23161 "Waiting for stale completions to drain.\n");
23162- h->access.set_intr_mask(h, CCISS_INTR_ON);
23163+ h->access->set_intr_mask(h, CCISS_INTR_ON);
23164 msleep(10000);
23165- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23166+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23167
23168 rc = controller_reset_failed(h->cfgtable);
23169 if (rc)
23170@@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
23171 cciss_scsi_setup(h);
23172
23173 /* Turn the interrupts on so we can service requests */
23174- h->access.set_intr_mask(h, CCISS_INTR_ON);
23175+ h->access->set_intr_mask(h, CCISS_INTR_ON);
23176
23177 /* Get the firmware version */
23178 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23179@@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
23180 kfree(flush_buf);
23181 if (return_code != IO_OK)
23182 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23183- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23184+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23185 free_irq(h->intr[PERF_MODE_INT], h);
23186 }
23187
23188diff -urNp linux-3.0.4/drivers/block/cciss.h linux-3.0.4/drivers/block/cciss.h
23189--- linux-3.0.4/drivers/block/cciss.h 2011-08-23 21:44:40.000000000 -0400
23190+++ linux-3.0.4/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
23191@@ -100,7 +100,7 @@ struct ctlr_info
23192 /* information about each logical volume */
23193 drive_info_struct *drv[CISS_MAX_LUN];
23194
23195- struct access_method access;
23196+ struct access_method *access;
23197
23198 /* queue and queue Info */
23199 struct list_head reqQ;
23200diff -urNp linux-3.0.4/drivers/block/cpqarray.c linux-3.0.4/drivers/block/cpqarray.c
23201--- linux-3.0.4/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
23202+++ linux-3.0.4/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
23203@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23204 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23205 goto Enomem4;
23206 }
23207- hba[i]->access.set_intr_mask(hba[i], 0);
23208+ hba[i]->access->set_intr_mask(hba[i], 0);
23209 if (request_irq(hba[i]->intr, do_ida_intr,
23210 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23211 {
23212@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23213 add_timer(&hba[i]->timer);
23214
23215 /* Enable IRQ now that spinlock and rate limit timer are set up */
23216- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23217+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23218
23219 for(j=0; j<NWD; j++) {
23220 struct gendisk *disk = ida_gendisk[i][j];
23221@@ -694,7 +694,7 @@ DBGINFO(
23222 for(i=0; i<NR_PRODUCTS; i++) {
23223 if (board_id == products[i].board_id) {
23224 c->product_name = products[i].product_name;
23225- c->access = *(products[i].access);
23226+ c->access = products[i].access;
23227 break;
23228 }
23229 }
23230@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23231 hba[ctlr]->intr = intr;
23232 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23233 hba[ctlr]->product_name = products[j].product_name;
23234- hba[ctlr]->access = *(products[j].access);
23235+ hba[ctlr]->access = products[j].access;
23236 hba[ctlr]->ctlr = ctlr;
23237 hba[ctlr]->board_id = board_id;
23238 hba[ctlr]->pci_dev = NULL; /* not PCI */
23239@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23240 struct scatterlist tmp_sg[SG_MAX];
23241 int i, dir, seg;
23242
23243+ pax_track_stack();
23244+
23245 queue_next:
23246 creq = blk_peek_request(q);
23247 if (!creq)
23248@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23249
23250 while((c = h->reqQ) != NULL) {
23251 /* Can't do anything if we're busy */
23252- if (h->access.fifo_full(h) == 0)
23253+ if (h->access->fifo_full(h) == 0)
23254 return;
23255
23256 /* Get the first entry from the request Q */
23257@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23258 h->Qdepth--;
23259
23260 /* Tell the controller to do our bidding */
23261- h->access.submit_command(h, c);
23262+ h->access->submit_command(h, c);
23263
23264 /* Get onto the completion Q */
23265 addQ(&h->cmpQ, c);
23266@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23267 unsigned long flags;
23268 __u32 a,a1;
23269
23270- istat = h->access.intr_pending(h);
23271+ istat = h->access->intr_pending(h);
23272 /* Is this interrupt for us? */
23273 if (istat == 0)
23274 return IRQ_NONE;
23275@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23276 */
23277 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23278 if (istat & FIFO_NOT_EMPTY) {
23279- while((a = h->access.command_completed(h))) {
23280+ while((a = h->access->command_completed(h))) {
23281 a1 = a; a &= ~3;
23282 if ((c = h->cmpQ) == NULL)
23283 {
23284@@ -1449,11 +1451,11 @@ static int sendcmd(
23285 /*
23286 * Disable interrupt
23287 */
23288- info_p->access.set_intr_mask(info_p, 0);
23289+ info_p->access->set_intr_mask(info_p, 0);
23290 /* Make sure there is room in the command FIFO */
23291 /* Actually it should be completely empty at this time. */
23292 for (i = 200000; i > 0; i--) {
23293- temp = info_p->access.fifo_full(info_p);
23294+ temp = info_p->access->fifo_full(info_p);
23295 if (temp != 0) {
23296 break;
23297 }
23298@@ -1466,7 +1468,7 @@ DBG(
23299 /*
23300 * Send the cmd
23301 */
23302- info_p->access.submit_command(info_p, c);
23303+ info_p->access->submit_command(info_p, c);
23304 complete = pollcomplete(ctlr);
23305
23306 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23307@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23308 * we check the new geometry. Then turn interrupts back on when
23309 * we're done.
23310 */
23311- host->access.set_intr_mask(host, 0);
23312+ host->access->set_intr_mask(host, 0);
23313 getgeometry(ctlr);
23314- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23315+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23316
23317 for(i=0; i<NWD; i++) {
23318 struct gendisk *disk = ida_gendisk[ctlr][i];
23319@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23320 /* Wait (up to 2 seconds) for a command to complete */
23321
23322 for (i = 200000; i > 0; i--) {
23323- done = hba[ctlr]->access.command_completed(hba[ctlr]);
23324+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
23325 if (done == 0) {
23326 udelay(10); /* a short fixed delay */
23327 } else
23328diff -urNp linux-3.0.4/drivers/block/cpqarray.h linux-3.0.4/drivers/block/cpqarray.h
23329--- linux-3.0.4/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
23330+++ linux-3.0.4/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
23331@@ -99,7 +99,7 @@ struct ctlr_info {
23332 drv_info_t drv[NWD];
23333 struct proc_dir_entry *proc;
23334
23335- struct access_method access;
23336+ struct access_method *access;
23337
23338 cmdlist_t *reqQ;
23339 cmdlist_t *cmpQ;
23340diff -urNp linux-3.0.4/drivers/block/DAC960.c linux-3.0.4/drivers/block/DAC960.c
23341--- linux-3.0.4/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
23342+++ linux-3.0.4/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
23343@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23344 unsigned long flags;
23345 int Channel, TargetID;
23346
23347+ pax_track_stack();
23348+
23349 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23350 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23351 sizeof(DAC960_SCSI_Inquiry_T) +
23352diff -urNp linux-3.0.4/drivers/block/drbd/drbd_int.h linux-3.0.4/drivers/block/drbd/drbd_int.h
23353--- linux-3.0.4/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
23354+++ linux-3.0.4/drivers/block/drbd/drbd_int.h 2011-08-23 21:47:55.000000000 -0400
23355@@ -737,7 +737,7 @@ struct drbd_request;
23356 struct drbd_epoch {
23357 struct list_head list;
23358 unsigned int barrier_nr;
23359- atomic_t epoch_size; /* increased on every request added. */
23360+ atomic_unchecked_t epoch_size; /* increased on every request added. */
23361 atomic_t active; /* increased on every req. added, and dec on every finished. */
23362 unsigned long flags;
23363 };
23364@@ -1109,7 +1109,7 @@ struct drbd_conf {
23365 void *int_dig_in;
23366 void *int_dig_vv;
23367 wait_queue_head_t seq_wait;
23368- atomic_t packet_seq;
23369+ atomic_unchecked_t packet_seq;
23370 unsigned int peer_seq;
23371 spinlock_t peer_seq_lock;
23372 unsigned int minor;
23373diff -urNp linux-3.0.4/drivers/block/drbd/drbd_main.c linux-3.0.4/drivers/block/drbd/drbd_main.c
23374--- linux-3.0.4/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
23375+++ linux-3.0.4/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
23376@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
23377 p.sector = sector;
23378 p.block_id = block_id;
23379 p.blksize = blksize;
23380- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23381+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23382
23383 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23384 return false;
23385@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
23386 p.sector = cpu_to_be64(req->sector);
23387 p.block_id = (unsigned long)req;
23388 p.seq_num = cpu_to_be32(req->seq_num =
23389- atomic_add_return(1, &mdev->packet_seq));
23390+ atomic_add_return_unchecked(1, &mdev->packet_seq));
23391
23392 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23393
23394@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
23395 atomic_set(&mdev->unacked_cnt, 0);
23396 atomic_set(&mdev->local_cnt, 0);
23397 atomic_set(&mdev->net_cnt, 0);
23398- atomic_set(&mdev->packet_seq, 0);
23399+ atomic_set_unchecked(&mdev->packet_seq, 0);
23400 atomic_set(&mdev->pp_in_use, 0);
23401 atomic_set(&mdev->pp_in_use_by_net, 0);
23402 atomic_set(&mdev->rs_sect_in, 0);
23403@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23404 mdev->receiver.t_state);
23405
23406 /* no need to lock it, I'm the only thread alive */
23407- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23408- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23409+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23410+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23411 mdev->al_writ_cnt =
23412 mdev->bm_writ_cnt =
23413 mdev->read_cnt =
23414diff -urNp linux-3.0.4/drivers/block/drbd/drbd_nl.c linux-3.0.4/drivers/block/drbd/drbd_nl.c
23415--- linux-3.0.4/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
23416+++ linux-3.0.4/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
23417@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
23418 module_put(THIS_MODULE);
23419 }
23420
23421-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23422+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23423
23424 static unsigned short *
23425 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23426@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
23427 cn_reply->id.idx = CN_IDX_DRBD;
23428 cn_reply->id.val = CN_VAL_DRBD;
23429
23430- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23431+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23432 cn_reply->ack = 0; /* not used here. */
23433 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23434 (int)((char *)tl - (char *)reply->tag_list);
23435@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23436 cn_reply->id.idx = CN_IDX_DRBD;
23437 cn_reply->id.val = CN_VAL_DRBD;
23438
23439- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23440+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23441 cn_reply->ack = 0; /* not used here. */
23442 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23443 (int)((char *)tl - (char *)reply->tag_list);
23444@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
23445 cn_reply->id.idx = CN_IDX_DRBD;
23446 cn_reply->id.val = CN_VAL_DRBD;
23447
23448- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
23449+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
23450 cn_reply->ack = 0; // not used here.
23451 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23452 (int)((char*)tl - (char*)reply->tag_list);
23453@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
23454 cn_reply->id.idx = CN_IDX_DRBD;
23455 cn_reply->id.val = CN_VAL_DRBD;
23456
23457- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23458+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23459 cn_reply->ack = 0; /* not used here. */
23460 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23461 (int)((char *)tl - (char *)reply->tag_list);
23462diff -urNp linux-3.0.4/drivers/block/drbd/drbd_receiver.c linux-3.0.4/drivers/block/drbd/drbd_receiver.c
23463--- linux-3.0.4/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
23464+++ linux-3.0.4/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
23465@@ -894,7 +894,7 @@ retry:
23466 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
23467 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
23468
23469- atomic_set(&mdev->packet_seq, 0);
23470+ atomic_set_unchecked(&mdev->packet_seq, 0);
23471 mdev->peer_seq = 0;
23472
23473 drbd_thread_start(&mdev->asender);
23474@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
23475 do {
23476 next_epoch = NULL;
23477
23478- epoch_size = atomic_read(&epoch->epoch_size);
23479+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
23480
23481 switch (ev & ~EV_CLEANUP) {
23482 case EV_PUT:
23483@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
23484 rv = FE_DESTROYED;
23485 } else {
23486 epoch->flags = 0;
23487- atomic_set(&epoch->epoch_size, 0);
23488+ atomic_set_unchecked(&epoch->epoch_size, 0);
23489 /* atomic_set(&epoch->active, 0); is already zero */
23490 if (rv == FE_STILL_LIVE)
23491 rv = FE_RECYCLED;
23492@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
23493 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
23494 drbd_flush(mdev);
23495
23496- if (atomic_read(&mdev->current_epoch->epoch_size)) {
23497+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23498 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
23499 if (epoch)
23500 break;
23501 }
23502
23503 epoch = mdev->current_epoch;
23504- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
23505+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
23506
23507 D_ASSERT(atomic_read(&epoch->active) == 0);
23508 D_ASSERT(epoch->flags == 0);
23509@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
23510 }
23511
23512 epoch->flags = 0;
23513- atomic_set(&epoch->epoch_size, 0);
23514+ atomic_set_unchecked(&epoch->epoch_size, 0);
23515 atomic_set(&epoch->active, 0);
23516
23517 spin_lock(&mdev->epoch_lock);
23518- if (atomic_read(&mdev->current_epoch->epoch_size)) {
23519+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23520 list_add(&epoch->list, &mdev->current_epoch->list);
23521 mdev->current_epoch = epoch;
23522 mdev->epochs++;
23523@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
23524 spin_unlock(&mdev->peer_seq_lock);
23525
23526 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
23527- atomic_inc(&mdev->current_epoch->epoch_size);
23528+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
23529 return drbd_drain_block(mdev, data_size);
23530 }
23531
23532@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
23533
23534 spin_lock(&mdev->epoch_lock);
23535 e->epoch = mdev->current_epoch;
23536- atomic_inc(&e->epoch->epoch_size);
23537+ atomic_inc_unchecked(&e->epoch->epoch_size);
23538 atomic_inc(&e->epoch->active);
23539 spin_unlock(&mdev->epoch_lock);
23540
23541@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
23542 D_ASSERT(list_empty(&mdev->done_ee));
23543
23544 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
23545- atomic_set(&mdev->current_epoch->epoch_size, 0);
23546+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
23547 D_ASSERT(list_empty(&mdev->current_epoch->list));
23548 }
23549
23550diff -urNp linux-3.0.4/drivers/block/nbd.c linux-3.0.4/drivers/block/nbd.c
23551--- linux-3.0.4/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
23552+++ linux-3.0.4/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
23553@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
23554 struct kvec iov;
23555 sigset_t blocked, oldset;
23556
23557+ pax_track_stack();
23558+
23559 if (unlikely(!sock)) {
23560 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
23561 lo->disk->disk_name, (send ? "send" : "recv"));
23562@@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
23563 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
23564 unsigned int cmd, unsigned long arg)
23565 {
23566+ pax_track_stack();
23567+
23568 switch (cmd) {
23569 case NBD_DISCONNECT: {
23570 struct request sreq;
23571diff -urNp linux-3.0.4/drivers/char/agp/frontend.c linux-3.0.4/drivers/char/agp/frontend.c
23572--- linux-3.0.4/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
23573+++ linux-3.0.4/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
23574@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
23575 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23576 return -EFAULT;
23577
23578- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23579+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23580 return -EFAULT;
23581
23582 client = agp_find_client_by_pid(reserve.pid);
23583diff -urNp linux-3.0.4/drivers/char/briq_panel.c linux-3.0.4/drivers/char/briq_panel.c
23584--- linux-3.0.4/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
23585+++ linux-3.0.4/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
23586@@ -9,6 +9,7 @@
23587 #include <linux/types.h>
23588 #include <linux/errno.h>
23589 #include <linux/tty.h>
23590+#include <linux/mutex.h>
23591 #include <linux/timer.h>
23592 #include <linux/kernel.h>
23593 #include <linux/wait.h>
23594@@ -34,6 +35,7 @@ static int vfd_is_open;
23595 static unsigned char vfd[40];
23596 static int vfd_cursor;
23597 static unsigned char ledpb, led;
23598+static DEFINE_MUTEX(vfd_mutex);
23599
23600 static void update_vfd(void)
23601 {
23602@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
23603 if (!vfd_is_open)
23604 return -EBUSY;
23605
23606+ mutex_lock(&vfd_mutex);
23607 for (;;) {
23608 char c;
23609 if (!indx)
23610 break;
23611- if (get_user(c, buf))
23612+ if (get_user(c, buf)) {
23613+ mutex_unlock(&vfd_mutex);
23614 return -EFAULT;
23615+ }
23616 if (esc) {
23617 set_led(c);
23618 esc = 0;
23619@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
23620 buf++;
23621 }
23622 update_vfd();
23623+ mutex_unlock(&vfd_mutex);
23624
23625 return len;
23626 }
23627diff -urNp linux-3.0.4/drivers/char/genrtc.c linux-3.0.4/drivers/char/genrtc.c
23628--- linux-3.0.4/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
23629+++ linux-3.0.4/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
23630@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
23631 switch (cmd) {
23632
23633 case RTC_PLL_GET:
23634+ memset(&pll, 0, sizeof(pll));
23635 if (get_rtc_pll(&pll))
23636 return -EINVAL;
23637 else
23638diff -urNp linux-3.0.4/drivers/char/hpet.c linux-3.0.4/drivers/char/hpet.c
23639--- linux-3.0.4/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
23640+++ linux-3.0.4/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
23641@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
23642 }
23643
23644 static int
23645-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
23646+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
23647 struct hpet_info *info)
23648 {
23649 struct hpet_timer __iomem *timer;
23650diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c
23651--- linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
23652+++ linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
23653@@ -415,7 +415,7 @@ struct ipmi_smi {
23654 struct proc_dir_entry *proc_dir;
23655 char proc_dir_name[10];
23656
23657- atomic_t stats[IPMI_NUM_STATS];
23658+ atomic_unchecked_t stats[IPMI_NUM_STATS];
23659
23660 /*
23661 * run_to_completion duplicate of smb_info, smi_info
23662@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23663
23664
23665 #define ipmi_inc_stat(intf, stat) \
23666- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23667+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23668 #define ipmi_get_stat(intf, stat) \
23669- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23670+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23671
23672 static int is_lan_addr(struct ipmi_addr *addr)
23673 {
23674@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23675 INIT_LIST_HEAD(&intf->cmd_rcvrs);
23676 init_waitqueue_head(&intf->waitq);
23677 for (i = 0; i < IPMI_NUM_STATS; i++)
23678- atomic_set(&intf->stats[i], 0);
23679+ atomic_set_unchecked(&intf->stats[i], 0);
23680
23681 intf->proc_dir = NULL;
23682
23683@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
23684 struct ipmi_smi_msg smi_msg;
23685 struct ipmi_recv_msg recv_msg;
23686
23687+ pax_track_stack();
23688+
23689 si = (struct ipmi_system_interface_addr *) &addr;
23690 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
23691 si->channel = IPMI_BMC_CHANNEL;
23692diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c
23693--- linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
23694+++ linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
23695@@ -277,7 +277,7 @@ struct smi_info {
23696 unsigned char slave_addr;
23697
23698 /* Counters and things for the proc filesystem. */
23699- atomic_t stats[SI_NUM_STATS];
23700+ atomic_unchecked_t stats[SI_NUM_STATS];
23701
23702 struct task_struct *thread;
23703
23704@@ -286,9 +286,9 @@ struct smi_info {
23705 };
23706
23707 #define smi_inc_stat(smi, stat) \
23708- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23709+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23710 #define smi_get_stat(smi, stat) \
23711- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23712+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23713
23714 #define SI_MAX_PARMS 4
23715
23716@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
23717 atomic_set(&new_smi->req_events, 0);
23718 new_smi->run_to_completion = 0;
23719 for (i = 0; i < SI_NUM_STATS; i++)
23720- atomic_set(&new_smi->stats[i], 0);
23721+ atomic_set_unchecked(&new_smi->stats[i], 0);
23722
23723 new_smi->interrupt_disabled = 1;
23724 atomic_set(&new_smi->stop_operation, 0);
23725diff -urNp linux-3.0.4/drivers/char/Kconfig linux-3.0.4/drivers/char/Kconfig
23726--- linux-3.0.4/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
23727+++ linux-3.0.4/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
23728@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
23729
23730 config DEVKMEM
23731 bool "/dev/kmem virtual device support"
23732- default y
23733+ default n
23734+ depends on !GRKERNSEC_KMEM
23735 help
23736 Say Y here if you want to support the /dev/kmem device. The
23737 /dev/kmem device is rarely used, but can be used for certain
23738@@ -596,6 +597,7 @@ config DEVPORT
23739 bool
23740 depends on !M68K
23741 depends on ISA || PCI
23742+ depends on !GRKERNSEC_KMEM
23743 default y
23744
23745 source "drivers/s390/char/Kconfig"
23746diff -urNp linux-3.0.4/drivers/char/mem.c linux-3.0.4/drivers/char/mem.c
23747--- linux-3.0.4/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
23748+++ linux-3.0.4/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
23749@@ -18,6 +18,7 @@
23750 #include <linux/raw.h>
23751 #include <linux/tty.h>
23752 #include <linux/capability.h>
23753+#include <linux/security.h>
23754 #include <linux/ptrace.h>
23755 #include <linux/device.h>
23756 #include <linux/highmem.h>
23757@@ -34,6 +35,10 @@
23758 # include <linux/efi.h>
23759 #endif
23760
23761+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23762+extern struct file_operations grsec_fops;
23763+#endif
23764+
23765 static inline unsigned long size_inside_page(unsigned long start,
23766 unsigned long size)
23767 {
23768@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
23769
23770 while (cursor < to) {
23771 if (!devmem_is_allowed(pfn)) {
23772+#ifdef CONFIG_GRKERNSEC_KMEM
23773+ gr_handle_mem_readwrite(from, to);
23774+#else
23775 printk(KERN_INFO
23776 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23777 current->comm, from, to);
23778+#endif
23779 return 0;
23780 }
23781 cursor += PAGE_SIZE;
23782@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
23783 }
23784 return 1;
23785 }
23786+#elif defined(CONFIG_GRKERNSEC_KMEM)
23787+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23788+{
23789+ return 0;
23790+}
23791 #else
23792 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23793 {
23794@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
23795
23796 while (count > 0) {
23797 unsigned long remaining;
23798+ char *temp;
23799
23800 sz = size_inside_page(p, count);
23801
23802@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
23803 if (!ptr)
23804 return -EFAULT;
23805
23806- remaining = copy_to_user(buf, ptr, sz);
23807+#ifdef CONFIG_PAX_USERCOPY
23808+ temp = kmalloc(sz, GFP_KERNEL);
23809+ if (!temp) {
23810+ unxlate_dev_mem_ptr(p, ptr);
23811+ return -ENOMEM;
23812+ }
23813+ memcpy(temp, ptr, sz);
23814+#else
23815+ temp = ptr;
23816+#endif
23817+
23818+ remaining = copy_to_user(buf, temp, sz);
23819+
23820+#ifdef CONFIG_PAX_USERCOPY
23821+ kfree(temp);
23822+#endif
23823+
23824 unxlate_dev_mem_ptr(p, ptr);
23825 if (remaining)
23826 return -EFAULT;
23827@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
23828 size_t count, loff_t *ppos)
23829 {
23830 unsigned long p = *ppos;
23831- ssize_t low_count, read, sz;
23832+ ssize_t low_count, read, sz, err = 0;
23833 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
23834- int err = 0;
23835
23836 read = 0;
23837 if (p < (unsigned long) high_memory) {
23838@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
23839 }
23840 #endif
23841 while (low_count > 0) {
23842+ char *temp;
23843+
23844 sz = size_inside_page(p, low_count);
23845
23846 /*
23847@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
23848 */
23849 kbuf = xlate_dev_kmem_ptr((char *)p);
23850
23851- if (copy_to_user(buf, kbuf, sz))
23852+#ifdef CONFIG_PAX_USERCOPY
23853+ temp = kmalloc(sz, GFP_KERNEL);
23854+ if (!temp)
23855+ return -ENOMEM;
23856+ memcpy(temp, kbuf, sz);
23857+#else
23858+ temp = kbuf;
23859+#endif
23860+
23861+ err = copy_to_user(buf, temp, sz);
23862+
23863+#ifdef CONFIG_PAX_USERCOPY
23864+ kfree(temp);
23865+#endif
23866+
23867+ if (err)
23868 return -EFAULT;
23869 buf += sz;
23870 p += sz;
23871@@ -866,6 +913,9 @@ static const struct memdev {
23872 #ifdef CONFIG_CRASH_DUMP
23873 [12] = { "oldmem", 0, &oldmem_fops, NULL },
23874 #endif
23875+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23876+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
23877+#endif
23878 };
23879
23880 static int memory_open(struct inode *inode, struct file *filp)
23881diff -urNp linux-3.0.4/drivers/char/nvram.c linux-3.0.4/drivers/char/nvram.c
23882--- linux-3.0.4/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
23883+++ linux-3.0.4/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
23884@@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
23885
23886 spin_unlock_irq(&rtc_lock);
23887
23888- if (copy_to_user(buf, contents, tmp - contents))
23889+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
23890 return -EFAULT;
23891
23892 *ppos = i;
23893diff -urNp linux-3.0.4/drivers/char/random.c linux-3.0.4/drivers/char/random.c
23894--- linux-3.0.4/drivers/char/random.c 2011-08-23 21:44:40.000000000 -0400
23895+++ linux-3.0.4/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
23896@@ -261,8 +261,13 @@
23897 /*
23898 * Configuration information
23899 */
23900+#ifdef CONFIG_GRKERNSEC_RANDNET
23901+#define INPUT_POOL_WORDS 512
23902+#define OUTPUT_POOL_WORDS 128
23903+#else
23904 #define INPUT_POOL_WORDS 128
23905 #define OUTPUT_POOL_WORDS 32
23906+#endif
23907 #define SEC_XFER_SIZE 512
23908 #define EXTRACT_SIZE 10
23909
23910@@ -300,10 +305,17 @@ static struct poolinfo {
23911 int poolwords;
23912 int tap1, tap2, tap3, tap4, tap5;
23913 } poolinfo_table[] = {
23914+#ifdef CONFIG_GRKERNSEC_RANDNET
23915+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
23916+ { 512, 411, 308, 208, 104, 1 },
23917+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
23918+ { 128, 103, 76, 51, 25, 1 },
23919+#else
23920 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
23921 { 128, 103, 76, 51, 25, 1 },
23922 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
23923 { 32, 26, 20, 14, 7, 1 },
23924+#endif
23925 #if 0
23926 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
23927 { 2048, 1638, 1231, 819, 411, 1 },
23928@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
23929
23930 extract_buf(r, tmp);
23931 i = min_t(int, nbytes, EXTRACT_SIZE);
23932- if (copy_to_user(buf, tmp, i)) {
23933+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
23934 ret = -EFAULT;
23935 break;
23936 }
23937@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
23938 #include <linux/sysctl.h>
23939
23940 static int min_read_thresh = 8, min_write_thresh;
23941-static int max_read_thresh = INPUT_POOL_WORDS * 32;
23942+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
23943 static int max_write_thresh = INPUT_POOL_WORDS * 32;
23944 static char sysctl_bootid[16];
23945
23946diff -urNp linux-3.0.4/drivers/char/sonypi.c linux-3.0.4/drivers/char/sonypi.c
23947--- linux-3.0.4/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
23948+++ linux-3.0.4/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
23949@@ -55,6 +55,7 @@
23950 #include <asm/uaccess.h>
23951 #include <asm/io.h>
23952 #include <asm/system.h>
23953+#include <asm/local.h>
23954
23955 #include <linux/sonypi.h>
23956
23957@@ -491,7 +492,7 @@ static struct sonypi_device {
23958 spinlock_t fifo_lock;
23959 wait_queue_head_t fifo_proc_list;
23960 struct fasync_struct *fifo_async;
23961- int open_count;
23962+ local_t open_count;
23963 int model;
23964 struct input_dev *input_jog_dev;
23965 struct input_dev *input_key_dev;
23966@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
23967 static int sonypi_misc_release(struct inode *inode, struct file *file)
23968 {
23969 mutex_lock(&sonypi_device.lock);
23970- sonypi_device.open_count--;
23971+ local_dec(&sonypi_device.open_count);
23972 mutex_unlock(&sonypi_device.lock);
23973 return 0;
23974 }
23975@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
23976 {
23977 mutex_lock(&sonypi_device.lock);
23978 /* Flush input queue on first open */
23979- if (!sonypi_device.open_count)
23980+ if (!local_read(&sonypi_device.open_count))
23981 kfifo_reset(&sonypi_device.fifo);
23982- sonypi_device.open_count++;
23983+ local_inc(&sonypi_device.open_count);
23984 mutex_unlock(&sonypi_device.lock);
23985
23986 return 0;
23987diff -urNp linux-3.0.4/drivers/char/tpm/tpm_bios.c linux-3.0.4/drivers/char/tpm/tpm_bios.c
23988--- linux-3.0.4/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
23989+++ linux-3.0.4/drivers/char/tpm/tpm_bios.c 2011-08-23 21:47:55.000000000 -0400
23990@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
23991 event = addr;
23992
23993 if ((event->event_type == 0 && event->event_size == 0) ||
23994- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
23995+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
23996 return NULL;
23997
23998 return addr;
23999@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24000 return NULL;
24001
24002 if ((event->event_type == 0 && event->event_size == 0) ||
24003- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24004+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24005 return NULL;
24006
24007 (*pos)++;
24008@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24009 int i;
24010
24011 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24012- seq_putc(m, data[i]);
24013+ if (!seq_putc(m, data[i]))
24014+ return -EFAULT;
24015
24016 return 0;
24017 }
24018@@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24019 log->bios_event_log_end = log->bios_event_log + len;
24020
24021 virt = acpi_os_map_memory(start, len);
24022+ if (!virt) {
24023+ kfree(log->bios_event_log);
24024+ log->bios_event_log = NULL;
24025+ return -EFAULT;
24026+ }
24027
24028 memcpy(log->bios_event_log, virt, len);
24029
24030diff -urNp linux-3.0.4/drivers/char/tpm/tpm.c linux-3.0.4/drivers/char/tpm/tpm.c
24031--- linux-3.0.4/drivers/char/tpm/tpm.c 2011-07-21 22:17:23.000000000 -0400
24032+++ linux-3.0.4/drivers/char/tpm/tpm.c 2011-08-23 21:48:14.000000000 -0400
24033@@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24034 chip->vendor.req_complete_val)
24035 goto out_recv;
24036
24037- if ((status == chip->vendor.req_canceled)) {
24038+ if (status == chip->vendor.req_canceled) {
24039 dev_err(chip->dev, "Operation Canceled\n");
24040 rc = -ECANCELED;
24041 goto out;
24042@@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24043
24044 struct tpm_chip *chip = dev_get_drvdata(dev);
24045
24046+ pax_track_stack();
24047+
24048 tpm_cmd.header.in = tpm_readpubek_header;
24049 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24050 "attempting to read the PUBEK");
24051diff -urNp linux-3.0.4/drivers/crypto/hifn_795x.c linux-3.0.4/drivers/crypto/hifn_795x.c
24052--- linux-3.0.4/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
24053+++ linux-3.0.4/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
24054@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24055 0xCA, 0x34, 0x2B, 0x2E};
24056 struct scatterlist sg;
24057
24058+ pax_track_stack();
24059+
24060 memset(src, 0, sizeof(src));
24061 memset(ctx.key, 0, sizeof(ctx.key));
24062
24063diff -urNp linux-3.0.4/drivers/crypto/padlock-aes.c linux-3.0.4/drivers/crypto/padlock-aes.c
24064--- linux-3.0.4/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
24065+++ linux-3.0.4/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
24066@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24067 struct crypto_aes_ctx gen_aes;
24068 int cpu;
24069
24070+ pax_track_stack();
24071+
24072 if (key_len % 8) {
24073 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24074 return -EINVAL;
24075diff -urNp linux-3.0.4/drivers/edac/edac_pci_sysfs.c linux-3.0.4/drivers/edac/edac_pci_sysfs.c
24076--- linux-3.0.4/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
24077+++ linux-3.0.4/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
24078@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24079 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24080 static int edac_pci_poll_msec = 1000; /* one second workq period */
24081
24082-static atomic_t pci_parity_count = ATOMIC_INIT(0);
24083-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24084+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24085+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24086
24087 static struct kobject *edac_pci_top_main_kobj;
24088 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24089@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24090 edac_printk(KERN_CRIT, EDAC_PCI,
24091 "Signaled System Error on %s\n",
24092 pci_name(dev));
24093- atomic_inc(&pci_nonparity_count);
24094+ atomic_inc_unchecked(&pci_nonparity_count);
24095 }
24096
24097 if (status & (PCI_STATUS_PARITY)) {
24098@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24099 "Master Data Parity Error on %s\n",
24100 pci_name(dev));
24101
24102- atomic_inc(&pci_parity_count);
24103+ atomic_inc_unchecked(&pci_parity_count);
24104 }
24105
24106 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24107@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24108 "Detected Parity Error on %s\n",
24109 pci_name(dev));
24110
24111- atomic_inc(&pci_parity_count);
24112+ atomic_inc_unchecked(&pci_parity_count);
24113 }
24114 }
24115
24116@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24117 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24118 "Signaled System Error on %s\n",
24119 pci_name(dev));
24120- atomic_inc(&pci_nonparity_count);
24121+ atomic_inc_unchecked(&pci_nonparity_count);
24122 }
24123
24124 if (status & (PCI_STATUS_PARITY)) {
24125@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24126 "Master Data Parity Error on "
24127 "%s\n", pci_name(dev));
24128
24129- atomic_inc(&pci_parity_count);
24130+ atomic_inc_unchecked(&pci_parity_count);
24131 }
24132
24133 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24134@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24135 "Detected Parity Error on %s\n",
24136 pci_name(dev));
24137
24138- atomic_inc(&pci_parity_count);
24139+ atomic_inc_unchecked(&pci_parity_count);
24140 }
24141 }
24142 }
24143@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24144 if (!check_pci_errors)
24145 return;
24146
24147- before_count = atomic_read(&pci_parity_count);
24148+ before_count = atomic_read_unchecked(&pci_parity_count);
24149
24150 /* scan all PCI devices looking for a Parity Error on devices and
24151 * bridges.
24152@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24153 /* Only if operator has selected panic on PCI Error */
24154 if (edac_pci_get_panic_on_pe()) {
24155 /* If the count is different 'after' from 'before' */
24156- if (before_count != atomic_read(&pci_parity_count))
24157+ if (before_count != atomic_read_unchecked(&pci_parity_count))
24158 panic("EDAC: PCI Parity Error");
24159 }
24160 }
24161diff -urNp linux-3.0.4/drivers/edac/mce_amd.h linux-3.0.4/drivers/edac/mce_amd.h
24162--- linux-3.0.4/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
24163+++ linux-3.0.4/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
24164@@ -83,7 +83,7 @@ struct amd_decoder_ops {
24165 bool (*dc_mce)(u16, u8);
24166 bool (*ic_mce)(u16, u8);
24167 bool (*nb_mce)(u16, u8);
24168-};
24169+} __no_const;
24170
24171 void amd_report_gart_errors(bool);
24172 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24173diff -urNp linux-3.0.4/drivers/firewire/core-card.c linux-3.0.4/drivers/firewire/core-card.c
24174--- linux-3.0.4/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
24175+++ linux-3.0.4/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
24176@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
24177
24178 void fw_core_remove_card(struct fw_card *card)
24179 {
24180- struct fw_card_driver dummy_driver = dummy_driver_template;
24181+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
24182
24183 card->driver->update_phy_reg(card, 4,
24184 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24185diff -urNp linux-3.0.4/drivers/firewire/core-cdev.c linux-3.0.4/drivers/firewire/core-cdev.c
24186--- linux-3.0.4/drivers/firewire/core-cdev.c 2011-08-23 21:44:40.000000000 -0400
24187+++ linux-3.0.4/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
24188@@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
24189 int ret;
24190
24191 if ((request->channels == 0 && request->bandwidth == 0) ||
24192- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24193- request->bandwidth < 0)
24194+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24195 return -EINVAL;
24196
24197 r = kmalloc(sizeof(*r), GFP_KERNEL);
24198diff -urNp linux-3.0.4/drivers/firewire/core.h linux-3.0.4/drivers/firewire/core.h
24199--- linux-3.0.4/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
24200+++ linux-3.0.4/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
24201@@ -101,6 +101,7 @@ struct fw_card_driver {
24202
24203 int (*stop_iso)(struct fw_iso_context *ctx);
24204 };
24205+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24206
24207 void fw_card_initialize(struct fw_card *card,
24208 const struct fw_card_driver *driver, struct device *device);
24209diff -urNp linux-3.0.4/drivers/firewire/core-transaction.c linux-3.0.4/drivers/firewire/core-transaction.c
24210--- linux-3.0.4/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
24211+++ linux-3.0.4/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
24212@@ -37,6 +37,7 @@
24213 #include <linux/timer.h>
24214 #include <linux/types.h>
24215 #include <linux/workqueue.h>
24216+#include <linux/sched.h>
24217
24218 #include <asm/byteorder.h>
24219
24220@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
24221 struct transaction_callback_data d;
24222 struct fw_transaction t;
24223
24224+ pax_track_stack();
24225+
24226 init_timer_on_stack(&t.split_timeout_timer);
24227 init_completion(&d.done);
24228 d.payload = payload;
24229diff -urNp linux-3.0.4/drivers/firmware/dmi_scan.c linux-3.0.4/drivers/firmware/dmi_scan.c
24230--- linux-3.0.4/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
24231+++ linux-3.0.4/drivers/firmware/dmi_scan.c 2011-08-23 21:47:55.000000000 -0400
24232@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24233 }
24234 }
24235 else {
24236- /*
24237- * no iounmap() for that ioremap(); it would be a no-op, but
24238- * it's so early in setup that sucker gets confused into doing
24239- * what it shouldn't if we actually call it.
24240- */
24241 p = dmi_ioremap(0xF0000, 0x10000);
24242 if (p == NULL)
24243 goto error;
24244diff -urNp linux-3.0.4/drivers/gpio/vr41xx_giu.c linux-3.0.4/drivers/gpio/vr41xx_giu.c
24245--- linux-3.0.4/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
24246+++ linux-3.0.4/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
24247@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24248 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24249 maskl, pendl, maskh, pendh);
24250
24251- atomic_inc(&irq_err_count);
24252+ atomic_inc_unchecked(&irq_err_count);
24253
24254 return -EINVAL;
24255 }
24256diff -urNp linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c
24257--- linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
24258+++ linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
24259@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24260 struct drm_crtc *tmp;
24261 int crtc_mask = 1;
24262
24263- WARN(!crtc, "checking null crtc?\n");
24264+ BUG_ON(!crtc);
24265
24266 dev = crtc->dev;
24267
24268@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24269 struct drm_encoder *encoder;
24270 bool ret = true;
24271
24272+ pax_track_stack();
24273+
24274 crtc->enabled = drm_helper_crtc_in_use(crtc);
24275 if (!crtc->enabled)
24276 return true;
24277diff -urNp linux-3.0.4/drivers/gpu/drm/drm_drv.c linux-3.0.4/drivers/gpu/drm/drm_drv.c
24278--- linux-3.0.4/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
24279+++ linux-3.0.4/drivers/gpu/drm/drm_drv.c 2011-08-23 21:47:55.000000000 -0400
24280@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24281
24282 dev = file_priv->minor->dev;
24283 atomic_inc(&dev->ioctl_count);
24284- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24285+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24286 ++file_priv->ioctl_count;
24287
24288 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24289diff -urNp linux-3.0.4/drivers/gpu/drm/drm_fops.c linux-3.0.4/drivers/gpu/drm/drm_fops.c
24290--- linux-3.0.4/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
24291+++ linux-3.0.4/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
24292@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24293 }
24294
24295 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24296- atomic_set(&dev->counts[i], 0);
24297+ atomic_set_unchecked(&dev->counts[i], 0);
24298
24299 dev->sigdata.lock = NULL;
24300
24301@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24302
24303 retcode = drm_open_helper(inode, filp, dev);
24304 if (!retcode) {
24305- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24306- if (!dev->open_count++)
24307+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24308+ if (local_inc_return(&dev->open_count) == 1)
24309 retcode = drm_setup(dev);
24310 }
24311 if (!retcode) {
24312@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24313
24314 mutex_lock(&drm_global_mutex);
24315
24316- DRM_DEBUG("open_count = %d\n", dev->open_count);
24317+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24318
24319 if (dev->driver->preclose)
24320 dev->driver->preclose(dev, file_priv);
24321@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24322 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24323 task_pid_nr(current),
24324 (long)old_encode_dev(file_priv->minor->device),
24325- dev->open_count);
24326+ local_read(&dev->open_count));
24327
24328 /* if the master has gone away we can't do anything with the lock */
24329 if (file_priv->minor->master)
24330@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24331 * End inline drm_release
24332 */
24333
24334- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24335- if (!--dev->open_count) {
24336+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24337+ if (local_dec_and_test(&dev->open_count)) {
24338 if (atomic_read(&dev->ioctl_count)) {
24339 DRM_ERROR("Device busy: %d\n",
24340 atomic_read(&dev->ioctl_count));
24341diff -urNp linux-3.0.4/drivers/gpu/drm/drm_global.c linux-3.0.4/drivers/gpu/drm/drm_global.c
24342--- linux-3.0.4/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
24343+++ linux-3.0.4/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
24344@@ -36,7 +36,7 @@
24345 struct drm_global_item {
24346 struct mutex mutex;
24347 void *object;
24348- int refcount;
24349+ atomic_t refcount;
24350 };
24351
24352 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24353@@ -49,7 +49,7 @@ void drm_global_init(void)
24354 struct drm_global_item *item = &glob[i];
24355 mutex_init(&item->mutex);
24356 item->object = NULL;
24357- item->refcount = 0;
24358+ atomic_set(&item->refcount, 0);
24359 }
24360 }
24361
24362@@ -59,7 +59,7 @@ void drm_global_release(void)
24363 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24364 struct drm_global_item *item = &glob[i];
24365 BUG_ON(item->object != NULL);
24366- BUG_ON(item->refcount != 0);
24367+ BUG_ON(atomic_read(&item->refcount) != 0);
24368 }
24369 }
24370
24371@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24372 void *object;
24373
24374 mutex_lock(&item->mutex);
24375- if (item->refcount == 0) {
24376+ if (atomic_read(&item->refcount) == 0) {
24377 item->object = kzalloc(ref->size, GFP_KERNEL);
24378 if (unlikely(item->object == NULL)) {
24379 ret = -ENOMEM;
24380@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24381 goto out_err;
24382
24383 }
24384- ++item->refcount;
24385+ atomic_inc(&item->refcount);
24386 ref->object = item->object;
24387 object = item->object;
24388 mutex_unlock(&item->mutex);
24389@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24390 struct drm_global_item *item = &glob[ref->global_type];
24391
24392 mutex_lock(&item->mutex);
24393- BUG_ON(item->refcount == 0);
24394+ BUG_ON(atomic_read(&item->refcount) == 0);
24395 BUG_ON(ref->object != item->object);
24396- if (--item->refcount == 0) {
24397+ if (atomic_dec_and_test(&item->refcount)) {
24398 ref->release(ref);
24399 item->object = NULL;
24400 }
24401diff -urNp linux-3.0.4/drivers/gpu/drm/drm_info.c linux-3.0.4/drivers/gpu/drm/drm_info.c
24402--- linux-3.0.4/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
24403+++ linux-3.0.4/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
24404@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24405 struct drm_local_map *map;
24406 struct drm_map_list *r_list;
24407
24408- /* Hardcoded from _DRM_FRAME_BUFFER,
24409- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24410- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24411- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24412+ static const char * const types[] = {
24413+ [_DRM_FRAME_BUFFER] = "FB",
24414+ [_DRM_REGISTERS] = "REG",
24415+ [_DRM_SHM] = "SHM",
24416+ [_DRM_AGP] = "AGP",
24417+ [_DRM_SCATTER_GATHER] = "SG",
24418+ [_DRM_CONSISTENT] = "PCI",
24419+ [_DRM_GEM] = "GEM" };
24420 const char *type;
24421 int i;
24422
24423@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24424 map = r_list->map;
24425 if (!map)
24426 continue;
24427- if (map->type < 0 || map->type > 5)
24428+ if (map->type >= ARRAY_SIZE(types))
24429 type = "??";
24430 else
24431 type = types[map->type];
24432@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24433 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24434 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24435 vma->vm_flags & VM_IO ? 'i' : '-',
24436+#ifdef CONFIG_GRKERNSEC_HIDESYM
24437+ 0);
24438+#else
24439 vma->vm_pgoff);
24440+#endif
24441
24442 #if defined(__i386__)
24443 pgprot = pgprot_val(vma->vm_page_prot);
24444diff -urNp linux-3.0.4/drivers/gpu/drm/drm_ioctl.c linux-3.0.4/drivers/gpu/drm/drm_ioctl.c
24445--- linux-3.0.4/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
24446+++ linux-3.0.4/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
24447@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
24448 stats->data[i].value =
24449 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24450 else
24451- stats->data[i].value = atomic_read(&dev->counts[i]);
24452+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24453 stats->data[i].type = dev->types[i];
24454 }
24455
24456diff -urNp linux-3.0.4/drivers/gpu/drm/drm_lock.c linux-3.0.4/drivers/gpu/drm/drm_lock.c
24457--- linux-3.0.4/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
24458+++ linux-3.0.4/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
24459@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
24460 if (drm_lock_take(&master->lock, lock->context)) {
24461 master->lock.file_priv = file_priv;
24462 master->lock.lock_time = jiffies;
24463- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24464+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24465 break; /* Got lock */
24466 }
24467
24468@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
24469 return -EINVAL;
24470 }
24471
24472- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24473+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24474
24475 if (drm_lock_free(&master->lock, lock->context)) {
24476 /* FIXME: Should really bail out here. */
24477diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c
24478--- linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
24479+++ linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
24480@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
24481 dma->buflist[vertex->idx],
24482 vertex->discard, vertex->used);
24483
24484- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24485- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24486+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24487+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24488 sarea_priv->last_enqueue = dev_priv->counter - 1;
24489 sarea_priv->last_dispatch = (int)hw_status[5];
24490
24491@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
24492 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24493 mc->last_render);
24494
24495- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24496- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24497+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24498+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24499 sarea_priv->last_enqueue = dev_priv->counter - 1;
24500 sarea_priv->last_dispatch = (int)hw_status[5];
24501
24502diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h
24503--- linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
24504+++ linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
24505@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
24506 int page_flipping;
24507
24508 wait_queue_head_t irq_queue;
24509- atomic_t irq_received;
24510- atomic_t irq_emitted;
24511+ atomic_unchecked_t irq_received;
24512+ atomic_unchecked_t irq_emitted;
24513
24514 int front_offset;
24515 } drm_i810_private_t;
24516diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c
24517--- linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
24518+++ linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-23 21:47:55.000000000 -0400
24519@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
24520 I915_READ(GTIMR));
24521 }
24522 seq_printf(m, "Interrupts received: %d\n",
24523- atomic_read(&dev_priv->irq_received));
24524+ atomic_read_unchecked(&dev_priv->irq_received));
24525 for (i = 0; i < I915_NUM_RINGS; i++) {
24526 if (IS_GEN6(dev)) {
24527 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
24528diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c
24529--- linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:44:40.000000000 -0400
24530+++ linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
24531@@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
24532 bool can_switch;
24533
24534 spin_lock(&dev->count_lock);
24535- can_switch = (dev->open_count == 0);
24536+ can_switch = (local_read(&dev->open_count) == 0);
24537 spin_unlock(&dev->count_lock);
24538 return can_switch;
24539 }
24540diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h
24541--- linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
24542+++ linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
24543@@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
24544 /* render clock increase/decrease */
24545 /* display clock increase/decrease */
24546 /* pll clock increase/decrease */
24547-};
24548+} __no_const;
24549
24550 struct intel_device_info {
24551 u8 gen;
24552@@ -300,7 +300,7 @@ typedef struct drm_i915_private {
24553 int current_page;
24554 int page_flipping;
24555
24556- atomic_t irq_received;
24557+ atomic_unchecked_t irq_received;
24558
24559 /* protects the irq masks */
24560 spinlock_t irq_lock;
24561@@ -874,7 +874,7 @@ struct drm_i915_gem_object {
24562 * will be page flipped away on the next vblank. When it
24563 * reaches 0, dev_priv->pending_flip_queue will be woken up.
24564 */
24565- atomic_t pending_flip;
24566+ atomic_unchecked_t pending_flip;
24567 };
24568
24569 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
24570@@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
24571 extern void intel_teardown_gmbus(struct drm_device *dev);
24572 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
24573 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
24574-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24575+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24576 {
24577 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
24578 }
24579diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
24580--- linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
24581+++ linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
24582@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
24583 i915_gem_clflush_object(obj);
24584
24585 if (obj->base.pending_write_domain)
24586- cd->flips |= atomic_read(&obj->pending_flip);
24587+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
24588
24589 /* The actual obj->write_domain will be updated with
24590 * pending_write_domain after we emit the accumulated flush for all
24591diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c
24592--- linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:44:40.000000000 -0400
24593+++ linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
24594@@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
24595 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
24596 struct drm_i915_master_private *master_priv;
24597
24598- atomic_inc(&dev_priv->irq_received);
24599+ atomic_inc_unchecked(&dev_priv->irq_received);
24600
24601 /* disable master interrupt before clearing iir */
24602 de_ier = I915_READ(DEIER);
24603@@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
24604 struct drm_i915_master_private *master_priv;
24605 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
24606
24607- atomic_inc(&dev_priv->irq_received);
24608+ atomic_inc_unchecked(&dev_priv->irq_received);
24609
24610 if (IS_GEN6(dev))
24611 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
24612@@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
24613 int ret = IRQ_NONE, pipe;
24614 bool blc_event = false;
24615
24616- atomic_inc(&dev_priv->irq_received);
24617+ atomic_inc_unchecked(&dev_priv->irq_received);
24618
24619 iir = I915_READ(IIR);
24620
24621@@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
24622 {
24623 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24624
24625- atomic_set(&dev_priv->irq_received, 0);
24626+ atomic_set_unchecked(&dev_priv->irq_received, 0);
24627
24628 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24629 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24630@@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
24631 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24632 int pipe;
24633
24634- atomic_set(&dev_priv->irq_received, 0);
24635+ atomic_set_unchecked(&dev_priv->irq_received, 0);
24636
24637 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24638 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24639diff -urNp linux-3.0.4/drivers/gpu/drm/i915/intel_display.c linux-3.0.4/drivers/gpu/drm/i915/intel_display.c
24640--- linux-3.0.4/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:44:40.000000000 -0400
24641+++ linux-3.0.4/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
24642@@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
24643
24644 wait_event(dev_priv->pending_flip_queue,
24645 atomic_read(&dev_priv->mm.wedged) ||
24646- atomic_read(&obj->pending_flip) == 0);
24647+ atomic_read_unchecked(&obj->pending_flip) == 0);
24648
24649 /* Big Hammer, we also need to ensure that any pending
24650 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
24651@@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
24652 obj = to_intel_framebuffer(crtc->fb)->obj;
24653 dev_priv = crtc->dev->dev_private;
24654 wait_event(dev_priv->pending_flip_queue,
24655- atomic_read(&obj->pending_flip) == 0);
24656+ atomic_read_unchecked(&obj->pending_flip) == 0);
24657 }
24658
24659 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
24660@@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
24661
24662 atomic_clear_mask(1 << intel_crtc->plane,
24663 &obj->pending_flip.counter);
24664- if (atomic_read(&obj->pending_flip) == 0)
24665+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
24666 wake_up(&dev_priv->pending_flip_queue);
24667
24668 schedule_work(&work->work);
24669@@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
24670 /* Block clients from rendering to the new back buffer until
24671 * the flip occurs and the object is no longer visible.
24672 */
24673- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24674+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24675
24676 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
24677 if (ret)
24678@@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
24679 return 0;
24680
24681 cleanup_pending:
24682- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24683+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24684 cleanup_objs:
24685 drm_gem_object_unreference(&work->old_fb_obj->base);
24686 drm_gem_object_unreference(&obj->base);
24687diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h
24688--- linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
24689+++ linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
24690@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
24691 u32 clear_cmd;
24692 u32 maccess;
24693
24694- atomic_t vbl_received; /**< Number of vblanks received. */
24695+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
24696 wait_queue_head_t fence_queue;
24697- atomic_t last_fence_retired;
24698+ atomic_unchecked_t last_fence_retired;
24699 u32 next_fence_to_post;
24700
24701 unsigned int fb_cpp;
24702diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c
24703--- linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
24704+++ linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
24705@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
24706 if (crtc != 0)
24707 return 0;
24708
24709- return atomic_read(&dev_priv->vbl_received);
24710+ return atomic_read_unchecked(&dev_priv->vbl_received);
24711 }
24712
24713
24714@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24715 /* VBLANK interrupt */
24716 if (status & MGA_VLINEPEN) {
24717 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
24718- atomic_inc(&dev_priv->vbl_received);
24719+ atomic_inc_unchecked(&dev_priv->vbl_received);
24720 drm_handle_vblank(dev, 0);
24721 handled = 1;
24722 }
24723@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24724 if ((prim_start & ~0x03) != (prim_end & ~0x03))
24725 MGA_WRITE(MGA_PRIMEND, prim_end);
24726
24727- atomic_inc(&dev_priv->last_fence_retired);
24728+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
24729 DRM_WAKEUP(&dev_priv->fence_queue);
24730 handled = 1;
24731 }
24732@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
24733 * using fences.
24734 */
24735 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
24736- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
24737+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
24738 - *sequence) <= (1 << 23)));
24739
24740 *sequence = cur_fence;
24741diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c
24742--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
24743+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-26 19:49:56.000000000 -0400
24744@@ -200,7 +200,7 @@ struct methods {
24745 const char desc[8];
24746 void (*loadbios)(struct drm_device *, uint8_t *);
24747 const bool rw;
24748-};
24749+} __do_const;
24750
24751 static struct methods shadow_methods[] = {
24752 { "PRAMIN", load_vbios_pramin, true },
24753@@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
24754 struct bit_table {
24755 const char id;
24756 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
24757-};
24758+} __no_const;
24759
24760 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
24761
24762diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h
24763--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
24764+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
24765@@ -227,7 +227,7 @@ struct nouveau_channel {
24766 struct list_head pending;
24767 uint32_t sequence;
24768 uint32_t sequence_ack;
24769- atomic_t last_sequence_irq;
24770+ atomic_unchecked_t last_sequence_irq;
24771 } fence;
24772
24773 /* DMA push buffer */
24774@@ -304,7 +304,7 @@ struct nouveau_exec_engine {
24775 u32 handle, u16 class);
24776 void (*set_tile_region)(struct drm_device *dev, int i);
24777 void (*tlb_flush)(struct drm_device *, int engine);
24778-};
24779+} __no_const;
24780
24781 struct nouveau_instmem_engine {
24782 void *priv;
24783@@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
24784 struct nouveau_mc_engine {
24785 int (*init)(struct drm_device *dev);
24786 void (*takedown)(struct drm_device *dev);
24787-};
24788+} __no_const;
24789
24790 struct nouveau_timer_engine {
24791 int (*init)(struct drm_device *dev);
24792 void (*takedown)(struct drm_device *dev);
24793 uint64_t (*read)(struct drm_device *dev);
24794-};
24795+} __no_const;
24796
24797 struct nouveau_fb_engine {
24798 int num_tiles;
24799@@ -494,7 +494,7 @@ struct nouveau_vram_engine {
24800 void (*put)(struct drm_device *, struct nouveau_mem **);
24801
24802 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
24803-};
24804+} __no_const;
24805
24806 struct nouveau_engine {
24807 struct nouveau_instmem_engine instmem;
24808@@ -640,7 +640,7 @@ struct drm_nouveau_private {
24809 struct drm_global_reference mem_global_ref;
24810 struct ttm_bo_global_ref bo_global_ref;
24811 struct ttm_bo_device bdev;
24812- atomic_t validate_sequence;
24813+ atomic_unchecked_t validate_sequence;
24814 } ttm;
24815
24816 struct {
24817diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c
24818--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
24819+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
24820@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
24821 if (USE_REFCNT(dev))
24822 sequence = nvchan_rd32(chan, 0x48);
24823 else
24824- sequence = atomic_read(&chan->fence.last_sequence_irq);
24825+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
24826
24827 if (chan->fence.sequence_ack == sequence)
24828 goto out;
24829@@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
24830
24831 INIT_LIST_HEAD(&chan->fence.pending);
24832 spin_lock_init(&chan->fence.lock);
24833- atomic_set(&chan->fence.last_sequence_irq, 0);
24834+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
24835 return 0;
24836 }
24837
24838diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c
24839--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
24840+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
24841@@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
24842 int trycnt = 0;
24843 int ret, i;
24844
24845- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
24846+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
24847 retry:
24848 if (++trycnt > 100000) {
24849 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
24850diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c
24851--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
24852+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
24853@@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
24854 bool can_switch;
24855
24856 spin_lock(&dev->count_lock);
24857- can_switch = (dev->open_count == 0);
24858+ can_switch = (local_read(&dev->open_count) == 0);
24859 spin_unlock(&dev->count_lock);
24860 return can_switch;
24861 }
24862diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c
24863--- linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
24864+++ linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
24865@@ -560,7 +560,7 @@ static int
24866 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
24867 u32 class, u32 mthd, u32 data)
24868 {
24869- atomic_set(&chan->fence.last_sequence_irq, data);
24870+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
24871 return 0;
24872 }
24873
24874diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c
24875--- linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
24876+++ linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
24877@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
24878
24879 /* GH: Simple idle check.
24880 */
24881- atomic_set(&dev_priv->idle_count, 0);
24882+ atomic_set_unchecked(&dev_priv->idle_count, 0);
24883
24884 /* We don't support anything other than bus-mastering ring mode,
24885 * but the ring can be in either AGP or PCI space for the ring
24886diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h
24887--- linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
24888+++ linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
24889@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
24890 int is_pci;
24891 unsigned long cce_buffers_offset;
24892
24893- atomic_t idle_count;
24894+ atomic_unchecked_t idle_count;
24895
24896 int page_flipping;
24897 int current_page;
24898 u32 crtc_offset;
24899 u32 crtc_offset_cntl;
24900
24901- atomic_t vbl_received;
24902+ atomic_unchecked_t vbl_received;
24903
24904 u32 color_fmt;
24905 unsigned int front_offset;
24906diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c
24907--- linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
24908+++ linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
24909@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
24910 if (crtc != 0)
24911 return 0;
24912
24913- return atomic_read(&dev_priv->vbl_received);
24914+ return atomic_read_unchecked(&dev_priv->vbl_received);
24915 }
24916
24917 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
24918@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
24919 /* VBLANK interrupt */
24920 if (status & R128_CRTC_VBLANK_INT) {
24921 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
24922- atomic_inc(&dev_priv->vbl_received);
24923+ atomic_inc_unchecked(&dev_priv->vbl_received);
24924 drm_handle_vblank(dev, 0);
24925 return IRQ_HANDLED;
24926 }
24927diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_state.c linux-3.0.4/drivers/gpu/drm/r128/r128_state.c
24928--- linux-3.0.4/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
24929+++ linux-3.0.4/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
24930@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
24931
24932 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
24933 {
24934- if (atomic_read(&dev_priv->idle_count) == 0)
24935+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
24936 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
24937 else
24938- atomic_set(&dev_priv->idle_count, 0);
24939+ atomic_set_unchecked(&dev_priv->idle_count, 0);
24940 }
24941
24942 #endif
24943diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/atom.c linux-3.0.4/drivers/gpu/drm/radeon/atom.c
24944--- linux-3.0.4/drivers/gpu/drm/radeon/atom.c 2011-07-21 22:17:23.000000000 -0400
24945+++ linux-3.0.4/drivers/gpu/drm/radeon/atom.c 2011-08-23 21:48:14.000000000 -0400
24946@@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
24947 char name[512];
24948 int i;
24949
24950+ pax_track_stack();
24951+
24952 ctx->card = card;
24953 ctx->bios = bios;
24954
24955diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c
24956--- linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
24957+++ linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
24958@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
24959 regex_t mask_rex;
24960 regmatch_t match[4];
24961 char buf[1024];
24962- size_t end;
24963+ long end;
24964 int len;
24965 int done = 0;
24966 int r;
24967 unsigned o;
24968 struct offset *offset;
24969 char last_reg_s[10];
24970- int last_reg;
24971+ unsigned long last_reg;
24972
24973 if (regcomp
24974 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
24975diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c
24976--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
24977+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
24978@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
24979 struct radeon_gpio_rec gpio;
24980 struct radeon_hpd hpd;
24981
24982+ pax_track_stack();
24983+
24984 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
24985 return false;
24986
24987diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c
24988--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:44:40.000000000 -0400
24989+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
24990@@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
24991 bool can_switch;
24992
24993 spin_lock(&dev->count_lock);
24994- can_switch = (dev->open_count == 0);
24995+ can_switch = (local_read(&dev->open_count) == 0);
24996 spin_unlock(&dev->count_lock);
24997 return can_switch;
24998 }
24999diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c
25000--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:44:40.000000000 -0400
25001+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
25002@@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
25003 uint32_t post_div;
25004 u32 pll_out_min, pll_out_max;
25005
25006+ pax_track_stack();
25007+
25008 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25009 freq = freq * 1000;
25010
25011diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h
25012--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
25013+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
25014@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25015
25016 /* SW interrupt */
25017 wait_queue_head_t swi_queue;
25018- atomic_t swi_emitted;
25019+ atomic_unchecked_t swi_emitted;
25020 int vblank_crtc;
25021 uint32_t irq_enable_reg;
25022 uint32_t r500_disp_irq_reg;
25023diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c
25024--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
25025+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
25026@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
25027 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25028 return 0;
25029 }
25030- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25031+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25032 if (!rdev->cp.ready)
25033 /* FIXME: cp is not running assume everythings is done right
25034 * away
25035@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
25036 return r;
25037 }
25038 radeon_fence_write(rdev, 0);
25039- atomic_set(&rdev->fence_drv.seq, 0);
25040+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25041 INIT_LIST_HEAD(&rdev->fence_drv.created);
25042 INIT_LIST_HEAD(&rdev->fence_drv.emited);
25043 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25044diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon.h linux-3.0.4/drivers/gpu/drm/radeon/radeon.h
25045--- linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-07-21 22:17:23.000000000 -0400
25046+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-08-23 21:47:55.000000000 -0400
25047@@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
25048 */
25049 struct radeon_fence_driver {
25050 uint32_t scratch_reg;
25051- atomic_t seq;
25052+ atomic_unchecked_t seq;
25053 uint32_t last_seq;
25054 unsigned long last_jiffies;
25055 unsigned long last_timeout;
25056@@ -960,7 +960,7 @@ struct radeon_asic {
25057 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25058 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25059 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25060-};
25061+} __no_const;
25062
25063 /*
25064 * Asic structures
25065diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c
25066--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
25067+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
25068@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25069 request = compat_alloc_user_space(sizeof(*request));
25070 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25071 || __put_user(req32.param, &request->param)
25072- || __put_user((void __user *)(unsigned long)req32.value,
25073+ || __put_user((unsigned long)req32.value,
25074 &request->value))
25075 return -EFAULT;
25076
25077diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c
25078--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
25079+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
25080@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25081 unsigned int ret;
25082 RING_LOCALS;
25083
25084- atomic_inc(&dev_priv->swi_emitted);
25085- ret = atomic_read(&dev_priv->swi_emitted);
25086+ atomic_inc_unchecked(&dev_priv->swi_emitted);
25087+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25088
25089 BEGIN_RING(4);
25090 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25091@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25092 drm_radeon_private_t *dev_priv =
25093 (drm_radeon_private_t *) dev->dev_private;
25094
25095- atomic_set(&dev_priv->swi_emitted, 0);
25096+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25097 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25098
25099 dev->max_vblank_count = 0x001fffff;
25100diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c
25101--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
25102+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
25103@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25104 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25105 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25106
25107- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25108+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25109 sarea_priv->nbox * sizeof(depth_boxes[0])))
25110 return -EFAULT;
25111
25112@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25113 {
25114 drm_radeon_private_t *dev_priv = dev->dev_private;
25115 drm_radeon_getparam_t *param = data;
25116- int value;
25117+ int value = 0;
25118
25119 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25120
25121diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c
25122--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-07-21 22:17:23.000000000 -0400
25123+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-23 21:47:55.000000000 -0400
25124@@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25125 }
25126 if (unlikely(ttm_vm_ops == NULL)) {
25127 ttm_vm_ops = vma->vm_ops;
25128- radeon_ttm_vm_ops = *ttm_vm_ops;
25129- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25130+ pax_open_kernel();
25131+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25132+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25133+ pax_close_kernel();
25134 }
25135 vma->vm_ops = &radeon_ttm_vm_ops;
25136 return 0;
25137diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/rs690.c linux-3.0.4/drivers/gpu/drm/radeon/rs690.c
25138--- linux-3.0.4/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
25139+++ linux-3.0.4/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
25140@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25141 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25142 rdev->pm.sideport_bandwidth.full)
25143 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25144- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25145+ read_delay_latency.full = dfixed_const(800 * 1000);
25146 read_delay_latency.full = dfixed_div(read_delay_latency,
25147 rdev->pm.igp_sideport_mclk);
25148+ a.full = dfixed_const(370);
25149+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25150 } else {
25151 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25152 rdev->pm.k8_bandwidth.full)
25153diff -urNp linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c
25154--- linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
25155+++ linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
25156@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
25157 static int ttm_pool_mm_shrink(struct shrinker *shrink,
25158 struct shrink_control *sc)
25159 {
25160- static atomic_t start_pool = ATOMIC_INIT(0);
25161+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25162 unsigned i;
25163- unsigned pool_offset = atomic_add_return(1, &start_pool);
25164+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25165 struct ttm_page_pool *pool;
25166 int shrink_pages = sc->nr_to_scan;
25167
25168diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_drv.h linux-3.0.4/drivers/gpu/drm/via/via_drv.h
25169--- linux-3.0.4/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
25170+++ linux-3.0.4/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
25171@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25172 typedef uint32_t maskarray_t[5];
25173
25174 typedef struct drm_via_irq {
25175- atomic_t irq_received;
25176+ atomic_unchecked_t irq_received;
25177 uint32_t pending_mask;
25178 uint32_t enable_mask;
25179 wait_queue_head_t irq_queue;
25180@@ -75,7 +75,7 @@ typedef struct drm_via_private {
25181 struct timeval last_vblank;
25182 int last_vblank_valid;
25183 unsigned usec_per_vblank;
25184- atomic_t vbl_received;
25185+ atomic_unchecked_t vbl_received;
25186 drm_via_state_t hc_state;
25187 char pci_buf[VIA_PCI_BUF_SIZE];
25188 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25189diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_irq.c linux-3.0.4/drivers/gpu/drm/via/via_irq.c
25190--- linux-3.0.4/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
25191+++ linux-3.0.4/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
25192@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25193 if (crtc != 0)
25194 return 0;
25195
25196- return atomic_read(&dev_priv->vbl_received);
25197+ return atomic_read_unchecked(&dev_priv->vbl_received);
25198 }
25199
25200 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25201@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25202
25203 status = VIA_READ(VIA_REG_INTERRUPT);
25204 if (status & VIA_IRQ_VBLANK_PENDING) {
25205- atomic_inc(&dev_priv->vbl_received);
25206- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25207+ atomic_inc_unchecked(&dev_priv->vbl_received);
25208+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25209 do_gettimeofday(&cur_vblank);
25210 if (dev_priv->last_vblank_valid) {
25211 dev_priv->usec_per_vblank =
25212@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25213 dev_priv->last_vblank = cur_vblank;
25214 dev_priv->last_vblank_valid = 1;
25215 }
25216- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25217+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25218 DRM_DEBUG("US per vblank is: %u\n",
25219 dev_priv->usec_per_vblank);
25220 }
25221@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25222
25223 for (i = 0; i < dev_priv->num_irqs; ++i) {
25224 if (status & cur_irq->pending_mask) {
25225- atomic_inc(&cur_irq->irq_received);
25226+ atomic_inc_unchecked(&cur_irq->irq_received);
25227 DRM_WAKEUP(&cur_irq->irq_queue);
25228 handled = 1;
25229 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25230@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25231 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25232 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25233 masks[irq][4]));
25234- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25235+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25236 } else {
25237 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25238 (((cur_irq_sequence =
25239- atomic_read(&cur_irq->irq_received)) -
25240+ atomic_read_unchecked(&cur_irq->irq_received)) -
25241 *sequence) <= (1 << 23)));
25242 }
25243 *sequence = cur_irq_sequence;
25244@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25245 }
25246
25247 for (i = 0; i < dev_priv->num_irqs; ++i) {
25248- atomic_set(&cur_irq->irq_received, 0);
25249+ atomic_set_unchecked(&cur_irq->irq_received, 0);
25250 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25251 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25252 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25253@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25254 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25255 case VIA_IRQ_RELATIVE:
25256 irqwait->request.sequence +=
25257- atomic_read(&cur_irq->irq_received);
25258+ atomic_read_unchecked(&cur_irq->irq_received);
25259 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25260 case VIA_IRQ_ABSOLUTE:
25261 break;
25262diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25263--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
25264+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
25265@@ -240,7 +240,7 @@ struct vmw_private {
25266 * Fencing and IRQs.
25267 */
25268
25269- atomic_t fence_seq;
25270+ atomic_unchecked_t fence_seq;
25271 wait_queue_head_t fence_queue;
25272 wait_queue_head_t fifo_queue;
25273 atomic_t fence_queue_waiters;
25274diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25275--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
25276+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
25277@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25278 while (!vmw_lag_lt(queue, us)) {
25279 spin_lock(&queue->lock);
25280 if (list_empty(&queue->head))
25281- sequence = atomic_read(&dev_priv->fence_seq);
25282+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25283 else {
25284 fence = list_first_entry(&queue->head,
25285 struct vmw_fence, head);
25286diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25287--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
25288+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-23 21:47:55.000000000 -0400
25289@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25290 (unsigned int) min,
25291 (unsigned int) fifo->capabilities);
25292
25293- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25294+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25295 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25296 vmw_fence_queue_init(&fifo->fence_queue);
25297 return vmw_fifo_send_fence(dev_priv, &dummy);
25298@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25299
25300 fm = vmw_fifo_reserve(dev_priv, bytes);
25301 if (unlikely(fm == NULL)) {
25302- *sequence = atomic_read(&dev_priv->fence_seq);
25303+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25304 ret = -ENOMEM;
25305 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25306 false, 3*HZ);
25307@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25308 }
25309
25310 do {
25311- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25312+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25313 } while (*sequence == 0);
25314
25315 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25316diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25317--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
25318+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
25319@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25320 * emitted. Then the fence is stale and signaled.
25321 */
25322
25323- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25324+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25325 > VMW_FENCE_WRAP);
25326
25327 return ret;
25328@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25329
25330 if (fifo_idle)
25331 down_read(&fifo_state->rwsem);
25332- signal_seq = atomic_read(&dev_priv->fence_seq);
25333+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25334 ret = 0;
25335
25336 for (;;) {
25337diff -urNp linux-3.0.4/drivers/hid/hid-core.c linux-3.0.4/drivers/hid/hid-core.c
25338--- linux-3.0.4/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
25339+++ linux-3.0.4/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
25340@@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
25341
25342 int hid_add_device(struct hid_device *hdev)
25343 {
25344- static atomic_t id = ATOMIC_INIT(0);
25345+ static atomic_unchecked_t id = ATOMIC_INIT(0);
25346 int ret;
25347
25348 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25349@@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
25350 /* XXX hack, any other cleaner solution after the driver core
25351 * is converted to allow more than 20 bytes as the device name? */
25352 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25353- hdev->vendor, hdev->product, atomic_inc_return(&id));
25354+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25355
25356 hid_debug_register(hdev, dev_name(&hdev->dev));
25357 ret = device_add(&hdev->dev);
25358diff -urNp linux-3.0.4/drivers/hid/usbhid/hiddev.c linux-3.0.4/drivers/hid/usbhid/hiddev.c
25359--- linux-3.0.4/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
25360+++ linux-3.0.4/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
25361@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
25362 break;
25363
25364 case HIDIOCAPPLICATION:
25365- if (arg < 0 || arg >= hid->maxapplication)
25366+ if (arg >= hid->maxapplication)
25367 break;
25368
25369 for (i = 0; i < hid->maxcollection; i++)
25370diff -urNp linux-3.0.4/drivers/hwmon/acpi_power_meter.c linux-3.0.4/drivers/hwmon/acpi_power_meter.c
25371--- linux-3.0.4/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
25372+++ linux-3.0.4/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
25373@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
25374 return res;
25375
25376 temp /= 1000;
25377- if (temp < 0)
25378- return -EINVAL;
25379
25380 mutex_lock(&resource->lock);
25381 resource->trip[attr->index - 7] = temp;
25382diff -urNp linux-3.0.4/drivers/hwmon/sht15.c linux-3.0.4/drivers/hwmon/sht15.c
25383--- linux-3.0.4/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
25384+++ linux-3.0.4/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
25385@@ -166,7 +166,7 @@ struct sht15_data {
25386 int supply_uV;
25387 bool supply_uV_valid;
25388 struct work_struct update_supply_work;
25389- atomic_t interrupt_handled;
25390+ atomic_unchecked_t interrupt_handled;
25391 };
25392
25393 /**
25394@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
25395 return ret;
25396
25397 gpio_direction_input(data->pdata->gpio_data);
25398- atomic_set(&data->interrupt_handled, 0);
25399+ atomic_set_unchecked(&data->interrupt_handled, 0);
25400
25401 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25402 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25403 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25404 /* Only relevant if the interrupt hasn't occurred. */
25405- if (!atomic_read(&data->interrupt_handled))
25406+ if (!atomic_read_unchecked(&data->interrupt_handled))
25407 schedule_work(&data->read_work);
25408 }
25409 ret = wait_event_timeout(data->wait_queue,
25410@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
25411
25412 /* First disable the interrupt */
25413 disable_irq_nosync(irq);
25414- atomic_inc(&data->interrupt_handled);
25415+ atomic_inc_unchecked(&data->interrupt_handled);
25416 /* Then schedule a reading work struct */
25417 if (data->state != SHT15_READING_NOTHING)
25418 schedule_work(&data->read_work);
25419@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
25420 * If not, then start the interrupt again - care here as could
25421 * have gone low in meantime so verify it hasn't!
25422 */
25423- atomic_set(&data->interrupt_handled, 0);
25424+ atomic_set_unchecked(&data->interrupt_handled, 0);
25425 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25426 /* If still not occurred or another handler has been scheduled */
25427 if (gpio_get_value(data->pdata->gpio_data)
25428- || atomic_read(&data->interrupt_handled))
25429+ || atomic_read_unchecked(&data->interrupt_handled))
25430 return;
25431 }
25432
25433diff -urNp linux-3.0.4/drivers/hwmon/w83791d.c linux-3.0.4/drivers/hwmon/w83791d.c
25434--- linux-3.0.4/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
25435+++ linux-3.0.4/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
25436@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25437 struct i2c_board_info *info);
25438 static int w83791d_remove(struct i2c_client *client);
25439
25440-static int w83791d_read(struct i2c_client *client, u8 register);
25441-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25442+static int w83791d_read(struct i2c_client *client, u8 reg);
25443+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25444 static struct w83791d_data *w83791d_update_device(struct device *dev);
25445
25446 #ifdef DEBUG
25447diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c
25448--- linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
25449+++ linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
25450@@ -43,7 +43,7 @@
25451 extern struct i2c_adapter amd756_smbus;
25452
25453 static struct i2c_adapter *s4882_adapter;
25454-static struct i2c_algorithm *s4882_algo;
25455+static i2c_algorithm_no_const *s4882_algo;
25456
25457 /* Wrapper access functions for multiplexed SMBus */
25458 static DEFINE_MUTEX(amd756_lock);
25459diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c
25460--- linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
25461+++ linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
25462@@ -41,7 +41,7 @@
25463 extern struct i2c_adapter *nforce2_smbus;
25464
25465 static struct i2c_adapter *s4985_adapter;
25466-static struct i2c_algorithm *s4985_algo;
25467+static i2c_algorithm_no_const *s4985_algo;
25468
25469 /* Wrapper access functions for multiplexed SMBus */
25470 static DEFINE_MUTEX(nforce2_lock);
25471diff -urNp linux-3.0.4/drivers/i2c/i2c-mux.c linux-3.0.4/drivers/i2c/i2c-mux.c
25472--- linux-3.0.4/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
25473+++ linux-3.0.4/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
25474@@ -28,7 +28,7 @@
25475 /* multiplexer per channel data */
25476 struct i2c_mux_priv {
25477 struct i2c_adapter adap;
25478- struct i2c_algorithm algo;
25479+ i2c_algorithm_no_const algo;
25480
25481 struct i2c_adapter *parent;
25482 void *mux_dev; /* the mux chip/device */
25483diff -urNp linux-3.0.4/drivers/ide/ide-cd.c linux-3.0.4/drivers/ide/ide-cd.c
25484--- linux-3.0.4/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
25485+++ linux-3.0.4/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
25486@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
25487 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25488 if ((unsigned long)buf & alignment
25489 || blk_rq_bytes(rq) & q->dma_pad_mask
25490- || object_is_on_stack(buf))
25491+ || object_starts_on_stack(buf))
25492 drive->dma = 0;
25493 }
25494 }
25495diff -urNp linux-3.0.4/drivers/ide/ide-floppy.c linux-3.0.4/drivers/ide/ide-floppy.c
25496--- linux-3.0.4/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
25497+++ linux-3.0.4/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
25498@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
25499 u8 pc_buf[256], header_len, desc_cnt;
25500 int i, rc = 1, blocks, length;
25501
25502+ pax_track_stack();
25503+
25504 ide_debug_log(IDE_DBG_FUNC, "enter");
25505
25506 drive->bios_cyl = 0;
25507diff -urNp linux-3.0.4/drivers/ide/setup-pci.c linux-3.0.4/drivers/ide/setup-pci.c
25508--- linux-3.0.4/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
25509+++ linux-3.0.4/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
25510@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
25511 int ret, i, n_ports = dev2 ? 4 : 2;
25512 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
25513
25514+ pax_track_stack();
25515+
25516 for (i = 0; i < n_ports / 2; i++) {
25517 ret = ide_setup_pci_controller(pdev[i], d, !i);
25518 if (ret < 0)
25519diff -urNp linux-3.0.4/drivers/infiniband/core/cm.c linux-3.0.4/drivers/infiniband/core/cm.c
25520--- linux-3.0.4/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
25521+++ linux-3.0.4/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
25522@@ -113,7 +113,7 @@ static char const counter_group_names[CM
25523
25524 struct cm_counter_group {
25525 struct kobject obj;
25526- atomic_long_t counter[CM_ATTR_COUNT];
25527+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25528 };
25529
25530 struct cm_counter_attribute {
25531@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25532 struct ib_mad_send_buf *msg = NULL;
25533 int ret;
25534
25535- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25536+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25537 counter[CM_REQ_COUNTER]);
25538
25539 /* Quick state check to discard duplicate REQs. */
25540@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25541 if (!cm_id_priv)
25542 return;
25543
25544- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25545+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25546 counter[CM_REP_COUNTER]);
25547 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25548 if (ret)
25549@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25550 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25551 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25552 spin_unlock_irq(&cm_id_priv->lock);
25553- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25554+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25555 counter[CM_RTU_COUNTER]);
25556 goto out;
25557 }
25558@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
25559 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25560 dreq_msg->local_comm_id);
25561 if (!cm_id_priv) {
25562- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25563+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25564 counter[CM_DREQ_COUNTER]);
25565 cm_issue_drep(work->port, work->mad_recv_wc);
25566 return -EINVAL;
25567@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
25568 case IB_CM_MRA_REP_RCVD:
25569 break;
25570 case IB_CM_TIMEWAIT:
25571- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25572+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25573 counter[CM_DREQ_COUNTER]);
25574 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25575 goto unlock;
25576@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
25577 cm_free_msg(msg);
25578 goto deref;
25579 case IB_CM_DREQ_RCVD:
25580- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25581+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25582 counter[CM_DREQ_COUNTER]);
25583 goto unlock;
25584 default:
25585@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
25586 ib_modify_mad(cm_id_priv->av.port->mad_agent,
25587 cm_id_priv->msg, timeout)) {
25588 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25589- atomic_long_inc(&work->port->
25590+ atomic_long_inc_unchecked(&work->port->
25591 counter_group[CM_RECV_DUPLICATES].
25592 counter[CM_MRA_COUNTER]);
25593 goto out;
25594@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
25595 break;
25596 case IB_CM_MRA_REQ_RCVD:
25597 case IB_CM_MRA_REP_RCVD:
25598- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25599+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25600 counter[CM_MRA_COUNTER]);
25601 /* fall through */
25602 default:
25603@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
25604 case IB_CM_LAP_IDLE:
25605 break;
25606 case IB_CM_MRA_LAP_SENT:
25607- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25608+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25609 counter[CM_LAP_COUNTER]);
25610 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25611 goto unlock;
25612@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
25613 cm_free_msg(msg);
25614 goto deref;
25615 case IB_CM_LAP_RCVD:
25616- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25617+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25618 counter[CM_LAP_COUNTER]);
25619 goto unlock;
25620 default:
25621@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
25622 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25623 if (cur_cm_id_priv) {
25624 spin_unlock_irq(&cm.lock);
25625- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25626+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25627 counter[CM_SIDR_REQ_COUNTER]);
25628 goto out; /* Duplicate message. */
25629 }
25630@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
25631 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25632 msg->retries = 1;
25633
25634- atomic_long_add(1 + msg->retries,
25635+ atomic_long_add_unchecked(1 + msg->retries,
25636 &port->counter_group[CM_XMIT].counter[attr_index]);
25637 if (msg->retries)
25638- atomic_long_add(msg->retries,
25639+ atomic_long_add_unchecked(msg->retries,
25640 &port->counter_group[CM_XMIT_RETRIES].
25641 counter[attr_index]);
25642
25643@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
25644 }
25645
25646 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25647- atomic_long_inc(&port->counter_group[CM_RECV].
25648+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25649 counter[attr_id - CM_ATTR_ID_OFFSET]);
25650
25651 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25652@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
25653 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25654
25655 return sprintf(buf, "%ld\n",
25656- atomic_long_read(&group->counter[cm_attr->index]));
25657+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25658 }
25659
25660 static const struct sysfs_ops cm_counter_ops = {
25661diff -urNp linux-3.0.4/drivers/infiniband/core/fmr_pool.c linux-3.0.4/drivers/infiniband/core/fmr_pool.c
25662--- linux-3.0.4/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
25663+++ linux-3.0.4/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
25664@@ -97,8 +97,8 @@ struct ib_fmr_pool {
25665
25666 struct task_struct *thread;
25667
25668- atomic_t req_ser;
25669- atomic_t flush_ser;
25670+ atomic_unchecked_t req_ser;
25671+ atomic_unchecked_t flush_ser;
25672
25673 wait_queue_head_t force_wait;
25674 };
25675@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
25676 struct ib_fmr_pool *pool = pool_ptr;
25677
25678 do {
25679- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
25680+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
25681 ib_fmr_batch_release(pool);
25682
25683- atomic_inc(&pool->flush_ser);
25684+ atomic_inc_unchecked(&pool->flush_ser);
25685 wake_up_interruptible(&pool->force_wait);
25686
25687 if (pool->flush_function)
25688@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
25689 }
25690
25691 set_current_state(TASK_INTERRUPTIBLE);
25692- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
25693+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
25694 !kthread_should_stop())
25695 schedule();
25696 __set_current_state(TASK_RUNNING);
25697@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
25698 pool->dirty_watermark = params->dirty_watermark;
25699 pool->dirty_len = 0;
25700 spin_lock_init(&pool->pool_lock);
25701- atomic_set(&pool->req_ser, 0);
25702- atomic_set(&pool->flush_ser, 0);
25703+ atomic_set_unchecked(&pool->req_ser, 0);
25704+ atomic_set_unchecked(&pool->flush_ser, 0);
25705 init_waitqueue_head(&pool->force_wait);
25706
25707 pool->thread = kthread_run(ib_fmr_cleanup_thread,
25708@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
25709 }
25710 spin_unlock_irq(&pool->pool_lock);
25711
25712- serial = atomic_inc_return(&pool->req_ser);
25713+ serial = atomic_inc_return_unchecked(&pool->req_ser);
25714 wake_up_process(pool->thread);
25715
25716 if (wait_event_interruptible(pool->force_wait,
25717- atomic_read(&pool->flush_ser) - serial >= 0))
25718+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
25719 return -EINTR;
25720
25721 return 0;
25722@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
25723 } else {
25724 list_add_tail(&fmr->list, &pool->dirty_list);
25725 if (++pool->dirty_len >= pool->dirty_watermark) {
25726- atomic_inc(&pool->req_ser);
25727+ atomic_inc_unchecked(&pool->req_ser);
25728 wake_up_process(pool->thread);
25729 }
25730 }
25731diff -urNp linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c
25732--- linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
25733+++ linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
25734@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
25735 int err;
25736 struct fw_ri_tpte tpt;
25737 u32 stag_idx;
25738- static atomic_t key;
25739+ static atomic_unchecked_t key;
25740
25741 if (c4iw_fatal_error(rdev))
25742 return -EIO;
25743@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
25744 &rdev->resource.tpt_fifo_lock);
25745 if (!stag_idx)
25746 return -ENOMEM;
25747- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
25748+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
25749 }
25750 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
25751 __func__, stag_state, type, pdid, stag_idx);
25752diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c
25753--- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
25754+++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
25755@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
25756 struct infinipath_counters counters;
25757 struct ipath_devdata *dd;
25758
25759+ pax_track_stack();
25760+
25761 dd = file->f_path.dentry->d_inode->i_private;
25762 dd->ipath_f_read_counters(dd, &counters);
25763
25764diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c
25765--- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
25766+++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
25767@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25768 struct ib_atomic_eth *ateth;
25769 struct ipath_ack_entry *e;
25770 u64 vaddr;
25771- atomic64_t *maddr;
25772+ atomic64_unchecked_t *maddr;
25773 u64 sdata;
25774 u32 rkey;
25775 u8 next;
25776@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25777 IB_ACCESS_REMOTE_ATOMIC)))
25778 goto nack_acc_unlck;
25779 /* Perform atomic OP and save result. */
25780- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25781+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25782 sdata = be64_to_cpu(ateth->swap_data);
25783 e = &qp->s_ack_queue[qp->r_head_ack_queue];
25784 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
25785- (u64) atomic64_add_return(sdata, maddr) - sdata :
25786+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25787 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25788 be64_to_cpu(ateth->compare_data),
25789 sdata);
25790diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c
25791--- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
25792+++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
25793@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
25794 unsigned long flags;
25795 struct ib_wc wc;
25796 u64 sdata;
25797- atomic64_t *maddr;
25798+ atomic64_unchecked_t *maddr;
25799 enum ib_wc_status send_status;
25800
25801 /*
25802@@ -382,11 +382,11 @@ again:
25803 IB_ACCESS_REMOTE_ATOMIC)))
25804 goto acc_err;
25805 /* Perform atomic OP and save result. */
25806- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25807+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25808 sdata = wqe->wr.wr.atomic.compare_add;
25809 *(u64 *) sqp->s_sge.sge.vaddr =
25810 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
25811- (u64) atomic64_add_return(sdata, maddr) - sdata :
25812+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25813 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25814 sdata, wqe->wr.wr.atomic.swap);
25815 goto send_comp;
25816diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.c linux-3.0.4/drivers/infiniband/hw/nes/nes.c
25817--- linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
25818+++ linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
25819@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
25820 LIST_HEAD(nes_adapter_list);
25821 static LIST_HEAD(nes_dev_list);
25822
25823-atomic_t qps_destroyed;
25824+atomic_unchecked_t qps_destroyed;
25825
25826 static unsigned int ee_flsh_adapter;
25827 static unsigned int sysfs_nonidx_addr;
25828@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
25829 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
25830 struct nes_adapter *nesadapter = nesdev->nesadapter;
25831
25832- atomic_inc(&qps_destroyed);
25833+ atomic_inc_unchecked(&qps_destroyed);
25834
25835 /* Free the control structures */
25836
25837diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c
25838--- linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
25839+++ linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
25840@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
25841 u32 cm_packets_retrans;
25842 u32 cm_packets_created;
25843 u32 cm_packets_received;
25844-atomic_t cm_listens_created;
25845-atomic_t cm_listens_destroyed;
25846+atomic_unchecked_t cm_listens_created;
25847+atomic_unchecked_t cm_listens_destroyed;
25848 u32 cm_backlog_drops;
25849-atomic_t cm_loopbacks;
25850-atomic_t cm_nodes_created;
25851-atomic_t cm_nodes_destroyed;
25852-atomic_t cm_accel_dropped_pkts;
25853-atomic_t cm_resets_recvd;
25854+atomic_unchecked_t cm_loopbacks;
25855+atomic_unchecked_t cm_nodes_created;
25856+atomic_unchecked_t cm_nodes_destroyed;
25857+atomic_unchecked_t cm_accel_dropped_pkts;
25858+atomic_unchecked_t cm_resets_recvd;
25859
25860 static inline int mini_cm_accelerated(struct nes_cm_core *,
25861 struct nes_cm_node *);
25862@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
25863
25864 static struct nes_cm_core *g_cm_core;
25865
25866-atomic_t cm_connects;
25867-atomic_t cm_accepts;
25868-atomic_t cm_disconnects;
25869-atomic_t cm_closes;
25870-atomic_t cm_connecteds;
25871-atomic_t cm_connect_reqs;
25872-atomic_t cm_rejects;
25873+atomic_unchecked_t cm_connects;
25874+atomic_unchecked_t cm_accepts;
25875+atomic_unchecked_t cm_disconnects;
25876+atomic_unchecked_t cm_closes;
25877+atomic_unchecked_t cm_connecteds;
25878+atomic_unchecked_t cm_connect_reqs;
25879+atomic_unchecked_t cm_rejects;
25880
25881
25882 /**
25883@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
25884 kfree(listener);
25885 listener = NULL;
25886 ret = 0;
25887- atomic_inc(&cm_listens_destroyed);
25888+ atomic_inc_unchecked(&cm_listens_destroyed);
25889 } else {
25890 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
25891 }
25892@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
25893 cm_node->rem_mac);
25894
25895 add_hte_node(cm_core, cm_node);
25896- atomic_inc(&cm_nodes_created);
25897+ atomic_inc_unchecked(&cm_nodes_created);
25898
25899 return cm_node;
25900 }
25901@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
25902 }
25903
25904 atomic_dec(&cm_core->node_cnt);
25905- atomic_inc(&cm_nodes_destroyed);
25906+ atomic_inc_unchecked(&cm_nodes_destroyed);
25907 nesqp = cm_node->nesqp;
25908 if (nesqp) {
25909 nesqp->cm_node = NULL;
25910@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
25911
25912 static void drop_packet(struct sk_buff *skb)
25913 {
25914- atomic_inc(&cm_accel_dropped_pkts);
25915+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
25916 dev_kfree_skb_any(skb);
25917 }
25918
25919@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
25920 {
25921
25922 int reset = 0; /* whether to send reset in case of err.. */
25923- atomic_inc(&cm_resets_recvd);
25924+ atomic_inc_unchecked(&cm_resets_recvd);
25925 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
25926 " refcnt=%d\n", cm_node, cm_node->state,
25927 atomic_read(&cm_node->ref_count));
25928@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
25929 rem_ref_cm_node(cm_node->cm_core, cm_node);
25930 return NULL;
25931 }
25932- atomic_inc(&cm_loopbacks);
25933+ atomic_inc_unchecked(&cm_loopbacks);
25934 loopbackremotenode->loopbackpartner = cm_node;
25935 loopbackremotenode->tcp_cntxt.rcv_wscale =
25936 NES_CM_DEFAULT_RCV_WND_SCALE;
25937@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
25938 add_ref_cm_node(cm_node);
25939 } else if (cm_node->state == NES_CM_STATE_TSA) {
25940 rem_ref_cm_node(cm_core, cm_node);
25941- atomic_inc(&cm_accel_dropped_pkts);
25942+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
25943 dev_kfree_skb_any(skb);
25944 break;
25945 }
25946@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
25947
25948 if ((cm_id) && (cm_id->event_handler)) {
25949 if (issue_disconn) {
25950- atomic_inc(&cm_disconnects);
25951+ atomic_inc_unchecked(&cm_disconnects);
25952 cm_event.event = IW_CM_EVENT_DISCONNECT;
25953 cm_event.status = disconn_status;
25954 cm_event.local_addr = cm_id->local_addr;
25955@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
25956 }
25957
25958 if (issue_close) {
25959- atomic_inc(&cm_closes);
25960+ atomic_inc_unchecked(&cm_closes);
25961 nes_disconnect(nesqp, 1);
25962
25963 cm_id->provider_data = nesqp;
25964@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
25965
25966 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
25967 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
25968- atomic_inc(&cm_accepts);
25969+ atomic_inc_unchecked(&cm_accepts);
25970
25971 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
25972 netdev_refcnt_read(nesvnic->netdev));
25973@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
25974
25975 struct nes_cm_core *cm_core;
25976
25977- atomic_inc(&cm_rejects);
25978+ atomic_inc_unchecked(&cm_rejects);
25979 cm_node = (struct nes_cm_node *) cm_id->provider_data;
25980 loopback = cm_node->loopbackpartner;
25981 cm_core = cm_node->cm_core;
25982@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
25983 ntohl(cm_id->local_addr.sin_addr.s_addr),
25984 ntohs(cm_id->local_addr.sin_port));
25985
25986- atomic_inc(&cm_connects);
25987+ atomic_inc_unchecked(&cm_connects);
25988 nesqp->active_conn = 1;
25989
25990 /* cache the cm_id in the qp */
25991@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
25992 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
25993 return err;
25994 }
25995- atomic_inc(&cm_listens_created);
25996+ atomic_inc_unchecked(&cm_listens_created);
25997 }
25998
25999 cm_id->add_ref(cm_id);
26000@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
26001 if (nesqp->destroyed) {
26002 return;
26003 }
26004- atomic_inc(&cm_connecteds);
26005+ atomic_inc_unchecked(&cm_connecteds);
26006 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
26007 " local port 0x%04X. jiffies = %lu.\n",
26008 nesqp->hwqp.qp_id,
26009@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26010
26011 cm_id->add_ref(cm_id);
26012 ret = cm_id->event_handler(cm_id, &cm_event);
26013- atomic_inc(&cm_closes);
26014+ atomic_inc_unchecked(&cm_closes);
26015 cm_event.event = IW_CM_EVENT_CLOSE;
26016 cm_event.status = 0;
26017 cm_event.provider_data = cm_id->provider_data;
26018@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26019 return;
26020 cm_id = cm_node->cm_id;
26021
26022- atomic_inc(&cm_connect_reqs);
26023+ atomic_inc_unchecked(&cm_connect_reqs);
26024 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26025 cm_node, cm_id, jiffies);
26026
26027@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26028 return;
26029 cm_id = cm_node->cm_id;
26030
26031- atomic_inc(&cm_connect_reqs);
26032+ atomic_inc_unchecked(&cm_connect_reqs);
26033 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26034 cm_node, cm_id, jiffies);
26035
26036diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.h linux-3.0.4/drivers/infiniband/hw/nes/nes.h
26037--- linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
26038+++ linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
26039@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26040 extern unsigned int wqm_quanta;
26041 extern struct list_head nes_adapter_list;
26042
26043-extern atomic_t cm_connects;
26044-extern atomic_t cm_accepts;
26045-extern atomic_t cm_disconnects;
26046-extern atomic_t cm_closes;
26047-extern atomic_t cm_connecteds;
26048-extern atomic_t cm_connect_reqs;
26049-extern atomic_t cm_rejects;
26050-extern atomic_t mod_qp_timouts;
26051-extern atomic_t qps_created;
26052-extern atomic_t qps_destroyed;
26053-extern atomic_t sw_qps_destroyed;
26054+extern atomic_unchecked_t cm_connects;
26055+extern atomic_unchecked_t cm_accepts;
26056+extern atomic_unchecked_t cm_disconnects;
26057+extern atomic_unchecked_t cm_closes;
26058+extern atomic_unchecked_t cm_connecteds;
26059+extern atomic_unchecked_t cm_connect_reqs;
26060+extern atomic_unchecked_t cm_rejects;
26061+extern atomic_unchecked_t mod_qp_timouts;
26062+extern atomic_unchecked_t qps_created;
26063+extern atomic_unchecked_t qps_destroyed;
26064+extern atomic_unchecked_t sw_qps_destroyed;
26065 extern u32 mh_detected;
26066 extern u32 mh_pauses_sent;
26067 extern u32 cm_packets_sent;
26068@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26069 extern u32 cm_packets_received;
26070 extern u32 cm_packets_dropped;
26071 extern u32 cm_packets_retrans;
26072-extern atomic_t cm_listens_created;
26073-extern atomic_t cm_listens_destroyed;
26074+extern atomic_unchecked_t cm_listens_created;
26075+extern atomic_unchecked_t cm_listens_destroyed;
26076 extern u32 cm_backlog_drops;
26077-extern atomic_t cm_loopbacks;
26078-extern atomic_t cm_nodes_created;
26079-extern atomic_t cm_nodes_destroyed;
26080-extern atomic_t cm_accel_dropped_pkts;
26081-extern atomic_t cm_resets_recvd;
26082+extern atomic_unchecked_t cm_loopbacks;
26083+extern atomic_unchecked_t cm_nodes_created;
26084+extern atomic_unchecked_t cm_nodes_destroyed;
26085+extern atomic_unchecked_t cm_accel_dropped_pkts;
26086+extern atomic_unchecked_t cm_resets_recvd;
26087
26088 extern u32 int_mod_timer_init;
26089 extern u32 int_mod_cq_depth_256;
26090diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c
26091--- linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
26092+++ linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
26093@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
26094 target_stat_values[++index] = mh_detected;
26095 target_stat_values[++index] = mh_pauses_sent;
26096 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26097- target_stat_values[++index] = atomic_read(&cm_connects);
26098- target_stat_values[++index] = atomic_read(&cm_accepts);
26099- target_stat_values[++index] = atomic_read(&cm_disconnects);
26100- target_stat_values[++index] = atomic_read(&cm_connecteds);
26101- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26102- target_stat_values[++index] = atomic_read(&cm_rejects);
26103- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26104- target_stat_values[++index] = atomic_read(&qps_created);
26105- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26106- target_stat_values[++index] = atomic_read(&qps_destroyed);
26107- target_stat_values[++index] = atomic_read(&cm_closes);
26108+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26109+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26110+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26111+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26112+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26113+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26114+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26115+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26116+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26117+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26118+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26119 target_stat_values[++index] = cm_packets_sent;
26120 target_stat_values[++index] = cm_packets_bounced;
26121 target_stat_values[++index] = cm_packets_created;
26122 target_stat_values[++index] = cm_packets_received;
26123 target_stat_values[++index] = cm_packets_dropped;
26124 target_stat_values[++index] = cm_packets_retrans;
26125- target_stat_values[++index] = atomic_read(&cm_listens_created);
26126- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26127+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26128+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26129 target_stat_values[++index] = cm_backlog_drops;
26130- target_stat_values[++index] = atomic_read(&cm_loopbacks);
26131- target_stat_values[++index] = atomic_read(&cm_nodes_created);
26132- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26133- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26134- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26135+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26136+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26137+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26138+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26139+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26140 target_stat_values[++index] = nesadapter->free_4kpbl;
26141 target_stat_values[++index] = nesadapter->free_256pbl;
26142 target_stat_values[++index] = int_mod_timer_init;
26143diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c
26144--- linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
26145+++ linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
26146@@ -46,9 +46,9 @@
26147
26148 #include <rdma/ib_umem.h>
26149
26150-atomic_t mod_qp_timouts;
26151-atomic_t qps_created;
26152-atomic_t sw_qps_destroyed;
26153+atomic_unchecked_t mod_qp_timouts;
26154+atomic_unchecked_t qps_created;
26155+atomic_unchecked_t sw_qps_destroyed;
26156
26157 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26158
26159@@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26160 if (init_attr->create_flags)
26161 return ERR_PTR(-EINVAL);
26162
26163- atomic_inc(&qps_created);
26164+ atomic_inc_unchecked(&qps_created);
26165 switch (init_attr->qp_type) {
26166 case IB_QPT_RC:
26167 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26168@@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26169 struct iw_cm_event cm_event;
26170 int ret;
26171
26172- atomic_inc(&sw_qps_destroyed);
26173+ atomic_inc_unchecked(&sw_qps_destroyed);
26174 nesqp->destroyed = 1;
26175
26176 /* Blow away the connection if it exists. */
26177diff -urNp linux-3.0.4/drivers/infiniband/hw/qib/qib.h linux-3.0.4/drivers/infiniband/hw/qib/qib.h
26178--- linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
26179+++ linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
26180@@ -51,6 +51,7 @@
26181 #include <linux/completion.h>
26182 #include <linux/kref.h>
26183 #include <linux/sched.h>
26184+#include <linux/slab.h>
26185
26186 #include "qib_common.h"
26187 #include "qib_verbs.h"
26188diff -urNp linux-3.0.4/drivers/input/gameport/gameport.c linux-3.0.4/drivers/input/gameport/gameport.c
26189--- linux-3.0.4/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
26190+++ linux-3.0.4/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
26191@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26192 */
26193 static void gameport_init_port(struct gameport *gameport)
26194 {
26195- static atomic_t gameport_no = ATOMIC_INIT(0);
26196+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26197
26198 __module_get(THIS_MODULE);
26199
26200 mutex_init(&gameport->drv_mutex);
26201 device_initialize(&gameport->dev);
26202 dev_set_name(&gameport->dev, "gameport%lu",
26203- (unsigned long)atomic_inc_return(&gameport_no) - 1);
26204+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26205 gameport->dev.bus = &gameport_bus;
26206 gameport->dev.release = gameport_release_port;
26207 if (gameport->parent)
26208diff -urNp linux-3.0.4/drivers/input/input.c linux-3.0.4/drivers/input/input.c
26209--- linux-3.0.4/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
26210+++ linux-3.0.4/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
26211@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
26212 */
26213 int input_register_device(struct input_dev *dev)
26214 {
26215- static atomic_t input_no = ATOMIC_INIT(0);
26216+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26217 struct input_handler *handler;
26218 const char *path;
26219 int error;
26220@@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
26221 dev->setkeycode = input_default_setkeycode;
26222
26223 dev_set_name(&dev->dev, "input%ld",
26224- (unsigned long) atomic_inc_return(&input_no) - 1);
26225+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26226
26227 error = device_add(&dev->dev);
26228 if (error)
26229diff -urNp linux-3.0.4/drivers/input/joystick/sidewinder.c linux-3.0.4/drivers/input/joystick/sidewinder.c
26230--- linux-3.0.4/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
26231+++ linux-3.0.4/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
26232@@ -30,6 +30,7 @@
26233 #include <linux/kernel.h>
26234 #include <linux/module.h>
26235 #include <linux/slab.h>
26236+#include <linux/sched.h>
26237 #include <linux/init.h>
26238 #include <linux/input.h>
26239 #include <linux/gameport.h>
26240@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26241 unsigned char buf[SW_LENGTH];
26242 int i;
26243
26244+ pax_track_stack();
26245+
26246 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26247
26248 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26249diff -urNp linux-3.0.4/drivers/input/joystick/xpad.c linux-3.0.4/drivers/input/joystick/xpad.c
26250--- linux-3.0.4/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
26251+++ linux-3.0.4/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
26252@@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26253
26254 static int xpad_led_probe(struct usb_xpad *xpad)
26255 {
26256- static atomic_t led_seq = ATOMIC_INIT(0);
26257+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26258 long led_no;
26259 struct xpad_led *led;
26260 struct led_classdev *led_cdev;
26261@@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26262 if (!led)
26263 return -ENOMEM;
26264
26265- led_no = (long)atomic_inc_return(&led_seq) - 1;
26266+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26267
26268 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26269 led->xpad = xpad;
26270diff -urNp linux-3.0.4/drivers/input/mousedev.c linux-3.0.4/drivers/input/mousedev.c
26271--- linux-3.0.4/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
26272+++ linux-3.0.4/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
26273@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
26274
26275 spin_unlock_irq(&client->packet_lock);
26276
26277- if (copy_to_user(buffer, data, count))
26278+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
26279 return -EFAULT;
26280
26281 return count;
26282diff -urNp linux-3.0.4/drivers/input/serio/serio.c linux-3.0.4/drivers/input/serio/serio.c
26283--- linux-3.0.4/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
26284+++ linux-3.0.4/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
26285@@ -497,7 +497,7 @@ static void serio_release_port(struct de
26286 */
26287 static void serio_init_port(struct serio *serio)
26288 {
26289- static atomic_t serio_no = ATOMIC_INIT(0);
26290+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26291
26292 __module_get(THIS_MODULE);
26293
26294@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26295 mutex_init(&serio->drv_mutex);
26296 device_initialize(&serio->dev);
26297 dev_set_name(&serio->dev, "serio%ld",
26298- (long)atomic_inc_return(&serio_no) - 1);
26299+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
26300 serio->dev.bus = &serio_bus;
26301 serio->dev.release = serio_release_port;
26302 serio->dev.groups = serio_device_attr_groups;
26303diff -urNp linux-3.0.4/drivers/isdn/capi/capi.c linux-3.0.4/drivers/isdn/capi/capi.c
26304--- linux-3.0.4/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
26305+++ linux-3.0.4/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
26306@@ -83,8 +83,8 @@ struct capiminor {
26307
26308 struct capi20_appl *ap;
26309 u32 ncci;
26310- atomic_t datahandle;
26311- atomic_t msgid;
26312+ atomic_unchecked_t datahandle;
26313+ atomic_unchecked_t msgid;
26314
26315 struct tty_port port;
26316 int ttyinstop;
26317@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
26318 capimsg_setu16(s, 2, mp->ap->applid);
26319 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26320 capimsg_setu8 (s, 5, CAPI_RESP);
26321- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26322+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26323 capimsg_setu32(s, 8, mp->ncci);
26324 capimsg_setu16(s, 12, datahandle);
26325 }
26326@@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
26327 mp->outbytes -= len;
26328 spin_unlock_bh(&mp->outlock);
26329
26330- datahandle = atomic_inc_return(&mp->datahandle);
26331+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26332 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26333 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26334 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26335 capimsg_setu16(skb->data, 2, mp->ap->applid);
26336 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26337 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26338- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26339+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26340 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26341 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26342 capimsg_setu16(skb->data, 16, len); /* Data length */
26343diff -urNp linux-3.0.4/drivers/isdn/gigaset/common.c linux-3.0.4/drivers/isdn/gigaset/common.c
26344--- linux-3.0.4/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
26345+++ linux-3.0.4/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
26346@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26347 cs->commands_pending = 0;
26348 cs->cur_at_seq = 0;
26349 cs->gotfwver = -1;
26350- cs->open_count = 0;
26351+ local_set(&cs->open_count, 0);
26352 cs->dev = NULL;
26353 cs->tty = NULL;
26354 cs->tty_dev = NULL;
26355diff -urNp linux-3.0.4/drivers/isdn/gigaset/gigaset.h linux-3.0.4/drivers/isdn/gigaset/gigaset.h
26356--- linux-3.0.4/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
26357+++ linux-3.0.4/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
26358@@ -35,6 +35,7 @@
26359 #include <linux/tty_driver.h>
26360 #include <linux/list.h>
26361 #include <asm/atomic.h>
26362+#include <asm/local.h>
26363
26364 #define GIG_VERSION {0, 5, 0, 0}
26365 #define GIG_COMPAT {0, 4, 0, 0}
26366@@ -433,7 +434,7 @@ struct cardstate {
26367 spinlock_t cmdlock;
26368 unsigned curlen, cmdbytes;
26369
26370- unsigned open_count;
26371+ local_t open_count;
26372 struct tty_struct *tty;
26373 struct tasklet_struct if_wake_tasklet;
26374 unsigned control_state;
26375diff -urNp linux-3.0.4/drivers/isdn/gigaset/interface.c linux-3.0.4/drivers/isdn/gigaset/interface.c
26376--- linux-3.0.4/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
26377+++ linux-3.0.4/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
26378@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
26379 }
26380 tty->driver_data = cs;
26381
26382- ++cs->open_count;
26383-
26384- if (cs->open_count == 1) {
26385+ if (local_inc_return(&cs->open_count) == 1) {
26386 spin_lock_irqsave(&cs->lock, flags);
26387 cs->tty = tty;
26388 spin_unlock_irqrestore(&cs->lock, flags);
26389@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
26390
26391 if (!cs->connected)
26392 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26393- else if (!cs->open_count)
26394+ else if (!local_read(&cs->open_count))
26395 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26396 else {
26397- if (!--cs->open_count) {
26398+ if (!local_dec_return(&cs->open_count)) {
26399 spin_lock_irqsave(&cs->lock, flags);
26400 cs->tty = NULL;
26401 spin_unlock_irqrestore(&cs->lock, flags);
26402@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
26403 if (!cs->connected) {
26404 gig_dbg(DEBUG_IF, "not connected");
26405 retval = -ENODEV;
26406- } else if (!cs->open_count)
26407+ } else if (!local_read(&cs->open_count))
26408 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26409 else {
26410 retval = 0;
26411@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
26412 retval = -ENODEV;
26413 goto done;
26414 }
26415- if (!cs->open_count) {
26416+ if (!local_read(&cs->open_count)) {
26417 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26418 retval = -ENODEV;
26419 goto done;
26420@@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
26421 if (!cs->connected) {
26422 gig_dbg(DEBUG_IF, "not connected");
26423 retval = -ENODEV;
26424- } else if (!cs->open_count)
26425+ } else if (!local_read(&cs->open_count))
26426 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26427 else if (cs->mstate != MS_LOCKED) {
26428 dev_warn(cs->dev, "can't write to unlocked device\n");
26429@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
26430
26431 if (!cs->connected)
26432 gig_dbg(DEBUG_IF, "not connected");
26433- else if (!cs->open_count)
26434+ else if (!local_read(&cs->open_count))
26435 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26436 else if (cs->mstate != MS_LOCKED)
26437 dev_warn(cs->dev, "can't write to unlocked device\n");
26438@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
26439
26440 if (!cs->connected)
26441 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26442- else if (!cs->open_count)
26443+ else if (!local_read(&cs->open_count))
26444 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26445 else
26446 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26447@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
26448
26449 if (!cs->connected)
26450 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26451- else if (!cs->open_count)
26452+ else if (!local_read(&cs->open_count))
26453 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26454 else
26455 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26456@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
26457 goto out;
26458 }
26459
26460- if (!cs->open_count) {
26461+ if (!local_read(&cs->open_count)) {
26462 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26463 goto out;
26464 }
26465diff -urNp linux-3.0.4/drivers/isdn/hardware/avm/b1.c linux-3.0.4/drivers/isdn/hardware/avm/b1.c
26466--- linux-3.0.4/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
26467+++ linux-3.0.4/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
26468@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
26469 }
26470 if (left) {
26471 if (t4file->user) {
26472- if (copy_from_user(buf, dp, left))
26473+ if (left > sizeof buf || copy_from_user(buf, dp, left))
26474 return -EFAULT;
26475 } else {
26476 memcpy(buf, dp, left);
26477@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
26478 }
26479 if (left) {
26480 if (config->user) {
26481- if (copy_from_user(buf, dp, left))
26482+ if (left > sizeof buf || copy_from_user(buf, dp, left))
26483 return -EFAULT;
26484 } else {
26485 memcpy(buf, dp, left);
26486diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c
26487--- linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
26488+++ linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
26489@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
26490 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
26491 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
26492
26493+ pax_track_stack();
26494
26495 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
26496 {
26497diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c
26498--- linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
26499+++ linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
26500@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
26501 IDI_SYNC_REQ req;
26502 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26503
26504+ pax_track_stack();
26505+
26506 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26507
26508 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26509diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c
26510--- linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
26511+++ linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
26512@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
26513 IDI_SYNC_REQ req;
26514 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26515
26516+ pax_track_stack();
26517+
26518 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26519
26520 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26521diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c
26522--- linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
26523+++ linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
26524@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
26525 IDI_SYNC_REQ req;
26526 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26527
26528+ pax_track_stack();
26529+
26530 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26531
26532 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26533diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h
26534--- linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
26535+++ linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
26536@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
26537 } diva_didd_add_adapter_t;
26538 typedef struct _diva_didd_remove_adapter {
26539 IDI_CALL p_request;
26540-} diva_didd_remove_adapter_t;
26541+} __no_const diva_didd_remove_adapter_t;
26542 typedef struct _diva_didd_read_adapter_array {
26543 void * buffer;
26544 dword length;
26545diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c
26546--- linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
26547+++ linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
26548@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
26549 IDI_SYNC_REQ req;
26550 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26551
26552+ pax_track_stack();
26553+
26554 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26555
26556 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26557diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/message.c linux-3.0.4/drivers/isdn/hardware/eicon/message.c
26558--- linux-3.0.4/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
26559+++ linux-3.0.4/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
26560@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
26561 dword d;
26562 word w;
26563
26564+ pax_track_stack();
26565+
26566 a = plci->adapter;
26567 Id = ((word)plci->Id<<8)|a->Id;
26568 PUT_WORD(&SS_Ind[4],0x0000);
26569@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
26570 word j, n, w;
26571 dword d;
26572
26573+ pax_track_stack();
26574+
26575
26576 for(i=0;i<8;i++) bp_parms[i].length = 0;
26577 for(i=0;i<2;i++) global_config[i].length = 0;
26578@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
26579 const byte llc3[] = {4,3,2,2,6,6,0};
26580 const byte header[] = {0,2,3,3,0,0,0};
26581
26582+ pax_track_stack();
26583+
26584 for(i=0;i<8;i++) bp_parms[i].length = 0;
26585 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
26586 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
26587@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
26588 word appl_number_group_type[MAX_APPL];
26589 PLCI *auxplci;
26590
26591+ pax_track_stack();
26592+
26593 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
26594
26595 if(!a->group_optimization_enabled)
26596diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c
26597--- linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
26598+++ linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
26599@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
26600 IDI_SYNC_REQ req;
26601 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26602
26603+ pax_track_stack();
26604+
26605 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26606
26607 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26608diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h
26609--- linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
26610+++ linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
26611@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
26612 typedef struct _diva_os_idi_adapter_interface {
26613 diva_init_card_proc_t cleanup_adapter_proc;
26614 diva_cmd_card_proc_t cmd_proc;
26615-} diva_os_idi_adapter_interface_t;
26616+} __no_const diva_os_idi_adapter_interface_t;
26617
26618 typedef struct _diva_os_xdi_adapter {
26619 struct list_head link;
26620diff -urNp linux-3.0.4/drivers/isdn/i4l/isdn_common.c linux-3.0.4/drivers/isdn/i4l/isdn_common.c
26621--- linux-3.0.4/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
26622+++ linux-3.0.4/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
26623@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
26624 } iocpar;
26625 void __user *argp = (void __user *)arg;
26626
26627+ pax_track_stack();
26628+
26629 #define name iocpar.name
26630 #define bname iocpar.bname
26631 #define iocts iocpar.iocts
26632diff -urNp linux-3.0.4/drivers/isdn/icn/icn.c linux-3.0.4/drivers/isdn/icn/icn.c
26633--- linux-3.0.4/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
26634+++ linux-3.0.4/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
26635@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
26636 if (count > len)
26637 count = len;
26638 if (user) {
26639- if (copy_from_user(msg, buf, count))
26640+ if (count > sizeof msg || copy_from_user(msg, buf, count))
26641 return -EFAULT;
26642 } else
26643 memcpy(msg, buf, count);
26644diff -urNp linux-3.0.4/drivers/lguest/core.c linux-3.0.4/drivers/lguest/core.c
26645--- linux-3.0.4/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
26646+++ linux-3.0.4/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
26647@@ -92,9 +92,17 @@ static __init int map_switcher(void)
26648 * it's worked so far. The end address needs +1 because __get_vm_area
26649 * allocates an extra guard page, so we need space for that.
26650 */
26651+
26652+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26653+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26654+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
26655+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26656+#else
26657 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26658 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
26659 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26660+#endif
26661+
26662 if (!switcher_vma) {
26663 err = -ENOMEM;
26664 printk("lguest: could not map switcher pages high\n");
26665@@ -119,7 +127,7 @@ static __init int map_switcher(void)
26666 * Now the Switcher is mapped at the right address, we can't fail!
26667 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
26668 */
26669- memcpy(switcher_vma->addr, start_switcher_text,
26670+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
26671 end_switcher_text - start_switcher_text);
26672
26673 printk(KERN_INFO "lguest: mapped switcher at %p\n",
26674diff -urNp linux-3.0.4/drivers/lguest/x86/core.c linux-3.0.4/drivers/lguest/x86/core.c
26675--- linux-3.0.4/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
26676+++ linux-3.0.4/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
26677@@ -59,7 +59,7 @@ static struct {
26678 /* Offset from where switcher.S was compiled to where we've copied it */
26679 static unsigned long switcher_offset(void)
26680 {
26681- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
26682+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
26683 }
26684
26685 /* This cpu's struct lguest_pages. */
26686@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
26687 * These copies are pretty cheap, so we do them unconditionally: */
26688 /* Save the current Host top-level page directory.
26689 */
26690+
26691+#ifdef CONFIG_PAX_PER_CPU_PGD
26692+ pages->state.host_cr3 = read_cr3();
26693+#else
26694 pages->state.host_cr3 = __pa(current->mm->pgd);
26695+#endif
26696+
26697 /*
26698 * Set up the Guest's page tables to see this CPU's pages (and no
26699 * other CPU's pages).
26700@@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
26701 * compiled-in switcher code and the high-mapped copy we just made.
26702 */
26703 for (i = 0; i < IDT_ENTRIES; i++)
26704- default_idt_entries[i] += switcher_offset();
26705+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
26706
26707 /*
26708 * Set up the Switcher's per-cpu areas.
26709@@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
26710 * it will be undisturbed when we switch. To change %cs and jump we
26711 * need this structure to feed to Intel's "lcall" instruction.
26712 */
26713- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
26714+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
26715 lguest_entry.segment = LGUEST_CS;
26716
26717 /*
26718diff -urNp linux-3.0.4/drivers/lguest/x86/switcher_32.S linux-3.0.4/drivers/lguest/x86/switcher_32.S
26719--- linux-3.0.4/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
26720+++ linux-3.0.4/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
26721@@ -87,6 +87,7 @@
26722 #include <asm/page.h>
26723 #include <asm/segment.h>
26724 #include <asm/lguest.h>
26725+#include <asm/processor-flags.h>
26726
26727 // We mark the start of the code to copy
26728 // It's placed in .text tho it's never run here
26729@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
26730 // Changes type when we load it: damn Intel!
26731 // For after we switch over our page tables
26732 // That entry will be read-only: we'd crash.
26733+
26734+#ifdef CONFIG_PAX_KERNEXEC
26735+ mov %cr0, %edx
26736+ xor $X86_CR0_WP, %edx
26737+ mov %edx, %cr0
26738+#endif
26739+
26740 movl $(GDT_ENTRY_TSS*8), %edx
26741 ltr %dx
26742
26743@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
26744 // Let's clear it again for our return.
26745 // The GDT descriptor of the Host
26746 // Points to the table after two "size" bytes
26747- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
26748+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
26749 // Clear "used" from type field (byte 5, bit 2)
26750- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
26751+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
26752+
26753+#ifdef CONFIG_PAX_KERNEXEC
26754+ mov %cr0, %eax
26755+ xor $X86_CR0_WP, %eax
26756+ mov %eax, %cr0
26757+#endif
26758
26759 // Once our page table's switched, the Guest is live!
26760 // The Host fades as we run this final step.
26761@@ -295,13 +309,12 @@ deliver_to_host:
26762 // I consulted gcc, and it gave
26763 // These instructions, which I gladly credit:
26764 leal (%edx,%ebx,8), %eax
26765- movzwl (%eax),%edx
26766- movl 4(%eax), %eax
26767- xorw %ax, %ax
26768- orl %eax, %edx
26769+ movl 4(%eax), %edx
26770+ movw (%eax), %dx
26771 // Now the address of the handler's in %edx
26772 // We call it now: its "iret" drops us home.
26773- jmp *%edx
26774+ ljmp $__KERNEL_CS, $1f
26775+1: jmp *%edx
26776
26777 // Every interrupt can come to us here
26778 // But we must truly tell each apart.
26779diff -urNp linux-3.0.4/drivers/md/dm.c linux-3.0.4/drivers/md/dm.c
26780--- linux-3.0.4/drivers/md/dm.c 2011-08-23 21:44:40.000000000 -0400
26781+++ linux-3.0.4/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
26782@@ -164,9 +164,9 @@ struct mapped_device {
26783 /*
26784 * Event handling.
26785 */
26786- atomic_t event_nr;
26787+ atomic_unchecked_t event_nr;
26788 wait_queue_head_t eventq;
26789- atomic_t uevent_seq;
26790+ atomic_unchecked_t uevent_seq;
26791 struct list_head uevent_list;
26792 spinlock_t uevent_lock; /* Protect access to uevent_list */
26793
26794@@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
26795 rwlock_init(&md->map_lock);
26796 atomic_set(&md->holders, 1);
26797 atomic_set(&md->open_count, 0);
26798- atomic_set(&md->event_nr, 0);
26799- atomic_set(&md->uevent_seq, 0);
26800+ atomic_set_unchecked(&md->event_nr, 0);
26801+ atomic_set_unchecked(&md->uevent_seq, 0);
26802 INIT_LIST_HEAD(&md->uevent_list);
26803 spin_lock_init(&md->uevent_lock);
26804
26805@@ -1977,7 +1977,7 @@ static void event_callback(void *context
26806
26807 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
26808
26809- atomic_inc(&md->event_nr);
26810+ atomic_inc_unchecked(&md->event_nr);
26811 wake_up(&md->eventq);
26812 }
26813
26814@@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
26815
26816 uint32_t dm_next_uevent_seq(struct mapped_device *md)
26817 {
26818- return atomic_add_return(1, &md->uevent_seq);
26819+ return atomic_add_return_unchecked(1, &md->uevent_seq);
26820 }
26821
26822 uint32_t dm_get_event_nr(struct mapped_device *md)
26823 {
26824- return atomic_read(&md->event_nr);
26825+ return atomic_read_unchecked(&md->event_nr);
26826 }
26827
26828 int dm_wait_event(struct mapped_device *md, int event_nr)
26829 {
26830 return wait_event_interruptible(md->eventq,
26831- (event_nr != atomic_read(&md->event_nr)));
26832+ (event_nr != atomic_read_unchecked(&md->event_nr)));
26833 }
26834
26835 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26836diff -urNp linux-3.0.4/drivers/md/dm-ioctl.c linux-3.0.4/drivers/md/dm-ioctl.c
26837--- linux-3.0.4/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
26838+++ linux-3.0.4/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
26839@@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
26840 cmd == DM_LIST_VERSIONS_CMD)
26841 return 0;
26842
26843- if ((cmd == DM_DEV_CREATE_CMD)) {
26844+ if (cmd == DM_DEV_CREATE_CMD) {
26845 if (!*param->name) {
26846 DMWARN("name not supplied when creating device");
26847 return -EINVAL;
26848diff -urNp linux-3.0.4/drivers/md/dm-raid1.c linux-3.0.4/drivers/md/dm-raid1.c
26849--- linux-3.0.4/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
26850+++ linux-3.0.4/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
26851@@ -40,7 +40,7 @@ enum dm_raid1_error {
26852
26853 struct mirror {
26854 struct mirror_set *ms;
26855- atomic_t error_count;
26856+ atomic_unchecked_t error_count;
26857 unsigned long error_type;
26858 struct dm_dev *dev;
26859 sector_t offset;
26860@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
26861 struct mirror *m;
26862
26863 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
26864- if (!atomic_read(&m->error_count))
26865+ if (!atomic_read_unchecked(&m->error_count))
26866 return m;
26867
26868 return NULL;
26869@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
26870 * simple way to tell if a device has encountered
26871 * errors.
26872 */
26873- atomic_inc(&m->error_count);
26874+ atomic_inc_unchecked(&m->error_count);
26875
26876 if (test_and_set_bit(error_type, &m->error_type))
26877 return;
26878@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
26879 struct mirror *m = get_default_mirror(ms);
26880
26881 do {
26882- if (likely(!atomic_read(&m->error_count)))
26883+ if (likely(!atomic_read_unchecked(&m->error_count)))
26884 return m;
26885
26886 if (m-- == ms->mirror)
26887@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
26888 {
26889 struct mirror *default_mirror = get_default_mirror(m->ms);
26890
26891- return !atomic_read(&default_mirror->error_count);
26892+ return !atomic_read_unchecked(&default_mirror->error_count);
26893 }
26894
26895 static int mirror_available(struct mirror_set *ms, struct bio *bio)
26896@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
26897 */
26898 if (likely(region_in_sync(ms, region, 1)))
26899 m = choose_mirror(ms, bio->bi_sector);
26900- else if (m && atomic_read(&m->error_count))
26901+ else if (m && atomic_read_unchecked(&m->error_count))
26902 m = NULL;
26903
26904 if (likely(m))
26905@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
26906 }
26907
26908 ms->mirror[mirror].ms = ms;
26909- atomic_set(&(ms->mirror[mirror].error_count), 0);
26910+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
26911 ms->mirror[mirror].error_type = 0;
26912 ms->mirror[mirror].offset = offset;
26913
26914@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
26915 */
26916 static char device_status_char(struct mirror *m)
26917 {
26918- if (!atomic_read(&(m->error_count)))
26919+ if (!atomic_read_unchecked(&(m->error_count)))
26920 return 'A';
26921
26922 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
26923diff -urNp linux-3.0.4/drivers/md/dm-stripe.c linux-3.0.4/drivers/md/dm-stripe.c
26924--- linux-3.0.4/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
26925+++ linux-3.0.4/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
26926@@ -20,7 +20,7 @@ struct stripe {
26927 struct dm_dev *dev;
26928 sector_t physical_start;
26929
26930- atomic_t error_count;
26931+ atomic_unchecked_t error_count;
26932 };
26933
26934 struct stripe_c {
26935@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
26936 kfree(sc);
26937 return r;
26938 }
26939- atomic_set(&(sc->stripe[i].error_count), 0);
26940+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
26941 }
26942
26943 ti->private = sc;
26944@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
26945 DMEMIT("%d ", sc->stripes);
26946 for (i = 0; i < sc->stripes; i++) {
26947 DMEMIT("%s ", sc->stripe[i].dev->name);
26948- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
26949+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
26950 'D' : 'A';
26951 }
26952 buffer[i] = '\0';
26953@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
26954 */
26955 for (i = 0; i < sc->stripes; i++)
26956 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
26957- atomic_inc(&(sc->stripe[i].error_count));
26958- if (atomic_read(&(sc->stripe[i].error_count)) <
26959+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
26960+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
26961 DM_IO_ERROR_THRESHOLD)
26962 schedule_work(&sc->trigger_event);
26963 }
26964diff -urNp linux-3.0.4/drivers/md/dm-table.c linux-3.0.4/drivers/md/dm-table.c
26965--- linux-3.0.4/drivers/md/dm-table.c 2011-07-21 22:17:23.000000000 -0400
26966+++ linux-3.0.4/drivers/md/dm-table.c 2011-08-23 21:47:55.000000000 -0400
26967@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
26968 if (!dev_size)
26969 return 0;
26970
26971- if ((start >= dev_size) || (start + len > dev_size)) {
26972+ if ((start >= dev_size) || (len > dev_size - start)) {
26973 DMWARN("%s: %s too small for target: "
26974 "start=%llu, len=%llu, dev_size=%llu",
26975 dm_device_name(ti->table->md), bdevname(bdev, b),
26976diff -urNp linux-3.0.4/drivers/md/md.c linux-3.0.4/drivers/md/md.c
26977--- linux-3.0.4/drivers/md/md.c 2011-07-21 22:17:23.000000000 -0400
26978+++ linux-3.0.4/drivers/md/md.c 2011-08-23 21:47:55.000000000 -0400
26979@@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
26980 * start build, activate spare
26981 */
26982 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
26983-static atomic_t md_event_count;
26984+static atomic_unchecked_t md_event_count;
26985 void md_new_event(mddev_t *mddev)
26986 {
26987- atomic_inc(&md_event_count);
26988+ atomic_inc_unchecked(&md_event_count);
26989 wake_up(&md_event_waiters);
26990 }
26991 EXPORT_SYMBOL_GPL(md_new_event);
26992@@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
26993 */
26994 static void md_new_event_inintr(mddev_t *mddev)
26995 {
26996- atomic_inc(&md_event_count);
26997+ atomic_inc_unchecked(&md_event_count);
26998 wake_up(&md_event_waiters);
26999 }
27000
27001@@ -1457,7 +1457,7 @@ static int super_1_load(mdk_rdev_t *rdev
27002
27003 rdev->preferred_minor = 0xffff;
27004 rdev->data_offset = le64_to_cpu(sb->data_offset);
27005- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27006+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27007
27008 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27009 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27010@@ -1635,7 +1635,7 @@ static void super_1_sync(mddev_t *mddev,
27011 else
27012 sb->resync_offset = cpu_to_le64(0);
27013
27014- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27015+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27016
27017 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27018 sb->size = cpu_to_le64(mddev->dev_sectors);
27019@@ -2428,7 +2428,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27020 static ssize_t
27021 errors_show(mdk_rdev_t *rdev, char *page)
27022 {
27023- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27024+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27025 }
27026
27027 static ssize_t
27028@@ -2437,7 +2437,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27029 char *e;
27030 unsigned long n = simple_strtoul(buf, &e, 10);
27031 if (*buf && (*e == 0 || *e == '\n')) {
27032- atomic_set(&rdev->corrected_errors, n);
27033+ atomic_set_unchecked(&rdev->corrected_errors, n);
27034 return len;
27035 }
27036 return -EINVAL;
27037@@ -2793,8 +2793,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27038 rdev->last_read_error.tv_sec = 0;
27039 rdev->last_read_error.tv_nsec = 0;
27040 atomic_set(&rdev->nr_pending, 0);
27041- atomic_set(&rdev->read_errors, 0);
27042- atomic_set(&rdev->corrected_errors, 0);
27043+ atomic_set_unchecked(&rdev->read_errors, 0);
27044+ atomic_set_unchecked(&rdev->corrected_errors, 0);
27045
27046 INIT_LIST_HEAD(&rdev->same_set);
27047 init_waitqueue_head(&rdev->blocked_wait);
27048@@ -6415,7 +6415,7 @@ static int md_seq_show(struct seq_file *
27049
27050 spin_unlock(&pers_lock);
27051 seq_printf(seq, "\n");
27052- mi->event = atomic_read(&md_event_count);
27053+ mi->event = atomic_read_unchecked(&md_event_count);
27054 return 0;
27055 }
27056 if (v == (void*)2) {
27057@@ -6504,7 +6504,7 @@ static int md_seq_show(struct seq_file *
27058 chunk_kb ? "KB" : "B");
27059 if (bitmap->file) {
27060 seq_printf(seq, ", file: ");
27061- seq_path(seq, &bitmap->file->f_path, " \t\n");
27062+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27063 }
27064
27065 seq_printf(seq, "\n");
27066@@ -6538,7 +6538,7 @@ static int md_seq_open(struct inode *ino
27067 else {
27068 struct seq_file *p = file->private_data;
27069 p->private = mi;
27070- mi->event = atomic_read(&md_event_count);
27071+ mi->event = atomic_read_unchecked(&md_event_count);
27072 }
27073 return error;
27074 }
27075@@ -6554,7 +6554,7 @@ static unsigned int mdstat_poll(struct f
27076 /* always allow read */
27077 mask = POLLIN | POLLRDNORM;
27078
27079- if (mi->event != atomic_read(&md_event_count))
27080+ if (mi->event != atomic_read_unchecked(&md_event_count))
27081 mask |= POLLERR | POLLPRI;
27082 return mask;
27083 }
27084@@ -6598,7 +6598,7 @@ static int is_mddev_idle(mddev_t *mddev,
27085 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27086 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27087 (int)part_stat_read(&disk->part0, sectors[1]) -
27088- atomic_read(&disk->sync_io);
27089+ atomic_read_unchecked(&disk->sync_io);
27090 /* sync IO will cause sync_io to increase before the disk_stats
27091 * as sync_io is counted when a request starts, and
27092 * disk_stats is counted when it completes.
27093diff -urNp linux-3.0.4/drivers/md/md.h linux-3.0.4/drivers/md/md.h
27094--- linux-3.0.4/drivers/md/md.h 2011-07-21 22:17:23.000000000 -0400
27095+++ linux-3.0.4/drivers/md/md.h 2011-08-23 21:47:55.000000000 -0400
27096@@ -97,13 +97,13 @@ struct mdk_rdev_s
27097 * only maintained for arrays that
27098 * support hot removal
27099 */
27100- atomic_t read_errors; /* number of consecutive read errors that
27101+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
27102 * we have tried to ignore.
27103 */
27104 struct timespec last_read_error; /* monotonic time since our
27105 * last read error
27106 */
27107- atomic_t corrected_errors; /* number of corrected read errors,
27108+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27109 * for reporting to userspace and storing
27110 * in superblock.
27111 */
27112@@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
27113
27114 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27115 {
27116- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27117+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27118 }
27119
27120 struct mdk_personality
27121diff -urNp linux-3.0.4/drivers/md/raid10.c linux-3.0.4/drivers/md/raid10.c
27122--- linux-3.0.4/drivers/md/raid10.c 2011-07-21 22:17:23.000000000 -0400
27123+++ linux-3.0.4/drivers/md/raid10.c 2011-08-23 21:47:55.000000000 -0400
27124@@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
27125 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27126 set_bit(R10BIO_Uptodate, &r10_bio->state);
27127 else {
27128- atomic_add(r10_bio->sectors,
27129+ atomic_add_unchecked(r10_bio->sectors,
27130 &conf->mirrors[d].rdev->corrected_errors);
27131 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27132 md_error(r10_bio->mddev,
27133@@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
27134 {
27135 struct timespec cur_time_mon;
27136 unsigned long hours_since_last;
27137- unsigned int read_errors = atomic_read(&rdev->read_errors);
27138+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27139
27140 ktime_get_ts(&cur_time_mon);
27141
27142@@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
27143 * overflowing the shift of read_errors by hours_since_last.
27144 */
27145 if (hours_since_last >= 8 * sizeof(read_errors))
27146- atomic_set(&rdev->read_errors, 0);
27147+ atomic_set_unchecked(&rdev->read_errors, 0);
27148 else
27149- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27150+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27151 }
27152
27153 /*
27154@@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
27155 return;
27156
27157 check_decay_read_errors(mddev, rdev);
27158- atomic_inc(&rdev->read_errors);
27159- if (atomic_read(&rdev->read_errors) > max_read_errors) {
27160+ atomic_inc_unchecked(&rdev->read_errors);
27161+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
27162 char b[BDEVNAME_SIZE];
27163 bdevname(rdev->bdev, b);
27164
27165@@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
27166 "md/raid10:%s: %s: Raid device exceeded "
27167 "read_error threshold [cur %d:max %d]\n",
27168 mdname(mddev), b,
27169- atomic_read(&rdev->read_errors), max_read_errors);
27170+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
27171 printk(KERN_NOTICE
27172 "md/raid10:%s: %s: Failing raid device\n",
27173 mdname(mddev), b);
27174@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
27175 test_bit(In_sync, &rdev->flags)) {
27176 atomic_inc(&rdev->nr_pending);
27177 rcu_read_unlock();
27178- atomic_add(s, &rdev->corrected_errors);
27179+ atomic_add_unchecked(s, &rdev->corrected_errors);
27180 if (sync_page_io(rdev,
27181 r10_bio->devs[sl].addr +
27182 sect,
27183diff -urNp linux-3.0.4/drivers/md/raid1.c linux-3.0.4/drivers/md/raid1.c
27184--- linux-3.0.4/drivers/md/raid1.c 2011-07-21 22:17:23.000000000 -0400
27185+++ linux-3.0.4/drivers/md/raid1.c 2011-08-23 21:47:55.000000000 -0400
27186@@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
27187 rdev_dec_pending(rdev, mddev);
27188 md_error(mddev, rdev);
27189 } else
27190- atomic_add(s, &rdev->corrected_errors);
27191+ atomic_add_unchecked(s, &rdev->corrected_errors);
27192 }
27193 d = start;
27194 while (d != r1_bio->read_disk) {
27195@@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
27196 /* Well, this device is dead */
27197 md_error(mddev, rdev);
27198 else {
27199- atomic_add(s, &rdev->corrected_errors);
27200+ atomic_add_unchecked(s, &rdev->corrected_errors);
27201 printk(KERN_INFO
27202 "md/raid1:%s: read error corrected "
27203 "(%d sectors at %llu on %s)\n",
27204diff -urNp linux-3.0.4/drivers/md/raid5.c linux-3.0.4/drivers/md/raid5.c
27205--- linux-3.0.4/drivers/md/raid5.c 2011-07-21 22:17:23.000000000 -0400
27206+++ linux-3.0.4/drivers/md/raid5.c 2011-08-23 21:48:14.000000000 -0400
27207@@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27208 bi->bi_next = NULL;
27209 if ((rw & WRITE) &&
27210 test_bit(R5_ReWrite, &sh->dev[i].flags))
27211- atomic_add(STRIPE_SECTORS,
27212+ atomic_add_unchecked(STRIPE_SECTORS,
27213 &rdev->corrected_errors);
27214 generic_make_request(bi);
27215 } else {
27216@@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27217 clear_bit(R5_ReadError, &sh->dev[i].flags);
27218 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27219 }
27220- if (atomic_read(&conf->disks[i].rdev->read_errors))
27221- atomic_set(&conf->disks[i].rdev->read_errors, 0);
27222+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27223+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27224 } else {
27225 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27226 int retry = 0;
27227 rdev = conf->disks[i].rdev;
27228
27229 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27230- atomic_inc(&rdev->read_errors);
27231+ atomic_inc_unchecked(&rdev->read_errors);
27232 if (conf->mddev->degraded >= conf->max_degraded)
27233 printk_rl(KERN_WARNING
27234 "md/raid:%s: read error not correctable "
27235@@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27236 (unsigned long long)(sh->sector
27237 + rdev->data_offset),
27238 bdn);
27239- else if (atomic_read(&rdev->read_errors)
27240+ else if (atomic_read_unchecked(&rdev->read_errors)
27241 > conf->max_nr_stripes)
27242 printk(KERN_WARNING
27243 "md/raid:%s: Too many read errors, failing device %s.\n",
27244@@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
27245 sector_t r_sector;
27246 struct stripe_head sh2;
27247
27248+ pax_track_stack();
27249
27250 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27251 stripe = new_sector;
27252diff -urNp linux-3.0.4/drivers/media/common/saa7146_hlp.c linux-3.0.4/drivers/media/common/saa7146_hlp.c
27253--- linux-3.0.4/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
27254+++ linux-3.0.4/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
27255@@ -353,6 +353,8 @@ static void calculate_clipping_registers
27256
27257 int x[32], y[32], w[32], h[32];
27258
27259+ pax_track_stack();
27260+
27261 /* clear out memory */
27262 memset(&line_list[0], 0x00, sizeof(u32)*32);
27263 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27264diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27265--- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
27266+++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
27267@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27268 u8 buf[HOST_LINK_BUF_SIZE];
27269 int i;
27270
27271+ pax_track_stack();
27272+
27273 dprintk("%s\n", __func__);
27274
27275 /* check if we have space for a link buf in the rx_buffer */
27276@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27277 unsigned long timeout;
27278 int written;
27279
27280+ pax_track_stack();
27281+
27282 dprintk("%s\n", __func__);
27283
27284 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27285diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h
27286--- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-21 22:17:23.000000000 -0400
27287+++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-24 18:24:40.000000000 -0400
27288@@ -68,12 +68,12 @@ struct dvb_demux_feed {
27289 union {
27290 struct dmx_ts_feed ts;
27291 struct dmx_section_feed sec;
27292- } feed;
27293+ } __no_const feed;
27294
27295 union {
27296 dmx_ts_cb ts;
27297 dmx_section_cb sec;
27298- } cb;
27299+ } __no_const cb;
27300
27301 struct dvb_demux *demux;
27302 void *priv;
27303diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c
27304--- linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-07-21 22:17:23.000000000 -0400
27305+++ linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-24 18:24:19.000000000 -0400
27306@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27307 const struct dvb_device *template, void *priv, int type)
27308 {
27309 struct dvb_device *dvbdev;
27310- struct file_operations *dvbdevfops;
27311+ file_operations_no_const *dvbdevfops;
27312 struct device *clsdev;
27313 int minor;
27314 int id;
27315diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c
27316--- linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-07-21 22:17:23.000000000 -0400
27317+++ linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-24 18:26:33.000000000 -0400
27318@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27319 struct dib0700_adapter_state {
27320 int (*set_param_save) (struct dvb_frontend *,
27321 struct dvb_frontend_parameters *);
27322-};
27323+} __no_const;
27324
27325 static int dib7070_set_param_override(struct dvb_frontend *fe,
27326 struct dvb_frontend_parameters *fep)
27327diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c
27328--- linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
27329+++ linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
27330@@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
27331 if (!buf)
27332 return -ENOMEM;
27333
27334+ pax_track_stack();
27335+
27336 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27337 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27338 hx.addr, hx.len, hx.chk);
27339diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h
27340--- linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h 2011-07-21 22:17:23.000000000 -0400
27341+++ linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h 2011-08-24 18:27:27.000000000 -0400
27342@@ -97,7 +97,7 @@
27343 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
27344
27345 struct dibusb_state {
27346- struct dib_fe_xfer_ops ops;
27347+ dib_fe_xfer_ops_no_const ops;
27348 int mt2060_present;
27349 u8 tuner_addr;
27350 };
27351diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c
27352--- linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-07-21 22:17:23.000000000 -0400
27353+++ linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-24 18:27:45.000000000 -0400
27354@@ -95,7 +95,7 @@ struct su3000_state {
27355
27356 struct s6x0_state {
27357 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27358-};
27359+} __no_const;
27360
27361 /* debug */
27362 static int dvb_usb_dw2102_debug;
27363diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c
27364--- linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
27365+++ linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
27366@@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
27367 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
27368 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
27369
27370+ pax_track_stack();
27371
27372 data[0] = 0x8a;
27373 len_in = 1;
27374@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
27375 int ret = 0, len_in;
27376 u8 data[512] = {0};
27377
27378+ pax_track_stack();
27379+
27380 data[0] = 0x0a;
27381 len_in = 1;
27382 info("FRM Firmware Cold Reset");
27383diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000.h linux-3.0.4/drivers/media/dvb/frontends/dib3000.h
27384--- linux-3.0.4/drivers/media/dvb/frontends/dib3000.h 2011-07-21 22:17:23.000000000 -0400
27385+++ linux-3.0.4/drivers/media/dvb/frontends/dib3000.h 2011-08-24 18:28:18.000000000 -0400
27386@@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
27387 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27388 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27389 };
27390+typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
27391
27392 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27393 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27394- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
27395+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
27396 #else
27397 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27398 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27399diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c
27400--- linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
27401+++ linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
27402@@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
27403 static struct dvb_frontend_ops dib3000mb_ops;
27404
27405 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27406- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27407+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
27408 {
27409 struct dib3000_state* state = NULL;
27410
27411diff -urNp linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c
27412--- linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
27413+++ linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
27414@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27415 int ret = -1;
27416 int sync;
27417
27418+ pax_track_stack();
27419+
27420 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27421
27422 fcp = 3000;
27423diff -urNp linux-3.0.4/drivers/media/dvb/frontends/or51211.c linux-3.0.4/drivers/media/dvb/frontends/or51211.c
27424--- linux-3.0.4/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
27425+++ linux-3.0.4/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
27426@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27427 u8 tudata[585];
27428 int i;
27429
27430+ pax_track_stack();
27431+
27432 dprintk("Firmware is %zd bytes\n",fw->size);
27433
27434 /* Get eprom data */
27435diff -urNp linux-3.0.4/drivers/media/video/cx18/cx18-driver.c linux-3.0.4/drivers/media/video/cx18/cx18-driver.c
27436--- linux-3.0.4/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
27437+++ linux-3.0.4/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
27438@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27439 struct i2c_client c;
27440 u8 eedata[256];
27441
27442+ pax_track_stack();
27443+
27444 memset(&c, 0, sizeof(c));
27445 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
27446 c.adapter = &cx->i2c_adap[0];
27447diff -urNp linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c
27448--- linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
27449+++ linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
27450@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
27451 bool handle = false;
27452 struct ir_raw_event ir_core_event[64];
27453
27454+ pax_track_stack();
27455+
27456 do {
27457 num = 0;
27458 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
27459diff -urNp linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
27460--- linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
27461+++ linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
27462@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
27463 u8 *eeprom;
27464 struct tveeprom tvdata;
27465
27466+ pax_track_stack();
27467+
27468 memset(&tvdata,0,sizeof(tvdata));
27469
27470 eeprom = pvr2_eeprom_fetch(hdw);
27471diff -urNp linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c
27472--- linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
27473+++ linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
27474@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
27475 unsigned char localPAT[256];
27476 unsigned char localPMT[256];
27477
27478+ pax_track_stack();
27479+
27480 /* Set video format - must be done first as it resets other settings */
27481 set_reg8(client, 0x41, h->video_format);
27482
27483diff -urNp linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c
27484--- linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
27485+++ linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
27486@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
27487 u8 tmp[512];
27488 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27489
27490+ pax_track_stack();
27491+
27492 /* While any outstand message on the bus exists... */
27493 do {
27494
27495@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
27496 u8 tmp[512];
27497 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27498
27499+ pax_track_stack();
27500+
27501 while (loop) {
27502
27503 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
27504diff -urNp linux-3.0.4/drivers/media/video/timblogiw.c linux-3.0.4/drivers/media/video/timblogiw.c
27505--- linux-3.0.4/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
27506+++ linux-3.0.4/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
27507@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
27508
27509 /* Platform device functions */
27510
27511-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27512+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
27513 .vidioc_querycap = timblogiw_querycap,
27514 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
27515 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
27516diff -urNp linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c
27517--- linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
27518+++ linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
27519@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
27520 unsigned char rv, gv, bv;
27521 static unsigned char *Y, *U, *V;
27522
27523+ pax_track_stack();
27524+
27525 frame = usbvision->cur_frame;
27526 image_size = frame->frmwidth * frame->frmheight;
27527 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
27528diff -urNp linux-3.0.4/drivers/media/video/videobuf-dma-sg.c linux-3.0.4/drivers/media/video/videobuf-dma-sg.c
27529--- linux-3.0.4/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
27530+++ linux-3.0.4/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
27531@@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
27532 {
27533 struct videobuf_queue q;
27534
27535+ pax_track_stack();
27536+
27537 /* Required to make generic handler to call __videobuf_alloc */
27538 q.int_ops = &sg_ops;
27539
27540diff -urNp linux-3.0.4/drivers/message/fusion/mptbase.c linux-3.0.4/drivers/message/fusion/mptbase.c
27541--- linux-3.0.4/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
27542+++ linux-3.0.4/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
27543@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
27544 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
27545 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
27546
27547+#ifdef CONFIG_GRKERNSEC_HIDESYM
27548+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
27549+#else
27550 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
27551 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
27552+#endif
27553+
27554 /*
27555 * Rounding UP to nearest 4-kB boundary here...
27556 */
27557diff -urNp linux-3.0.4/drivers/message/fusion/mptsas.c linux-3.0.4/drivers/message/fusion/mptsas.c
27558--- linux-3.0.4/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
27559+++ linux-3.0.4/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
27560@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
27561 return 0;
27562 }
27563
27564+static inline void
27565+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27566+{
27567+ if (phy_info->port_details) {
27568+ phy_info->port_details->rphy = rphy;
27569+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27570+ ioc->name, rphy));
27571+ }
27572+
27573+ if (rphy) {
27574+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27575+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27576+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27577+ ioc->name, rphy, rphy->dev.release));
27578+ }
27579+}
27580+
27581 /* no mutex */
27582 static void
27583 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
27584@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
27585 return NULL;
27586 }
27587
27588-static inline void
27589-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27590-{
27591- if (phy_info->port_details) {
27592- phy_info->port_details->rphy = rphy;
27593- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27594- ioc->name, rphy));
27595- }
27596-
27597- if (rphy) {
27598- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27599- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27600- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27601- ioc->name, rphy, rphy->dev.release));
27602- }
27603-}
27604-
27605 static inline struct sas_port *
27606 mptsas_get_port(struct mptsas_phyinfo *phy_info)
27607 {
27608diff -urNp linux-3.0.4/drivers/message/fusion/mptscsih.c linux-3.0.4/drivers/message/fusion/mptscsih.c
27609--- linux-3.0.4/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
27610+++ linux-3.0.4/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
27611@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
27612
27613 h = shost_priv(SChost);
27614
27615- if (h) {
27616- if (h->info_kbuf == NULL)
27617- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27618- return h->info_kbuf;
27619- h->info_kbuf[0] = '\0';
27620+ if (!h)
27621+ return NULL;
27622
27623- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27624- h->info_kbuf[size-1] = '\0';
27625- }
27626+ if (h->info_kbuf == NULL)
27627+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27628+ return h->info_kbuf;
27629+ h->info_kbuf[0] = '\0';
27630+
27631+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27632+ h->info_kbuf[size-1] = '\0';
27633
27634 return h->info_kbuf;
27635 }
27636diff -urNp linux-3.0.4/drivers/message/i2o/i2o_config.c linux-3.0.4/drivers/message/i2o/i2o_config.c
27637--- linux-3.0.4/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
27638+++ linux-3.0.4/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
27639@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
27640 struct i2o_message *msg;
27641 unsigned int iop;
27642
27643+ pax_track_stack();
27644+
27645 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
27646 return -EFAULT;
27647
27648diff -urNp linux-3.0.4/drivers/message/i2o/i2o_proc.c linux-3.0.4/drivers/message/i2o/i2o_proc.c
27649--- linux-3.0.4/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
27650+++ linux-3.0.4/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
27651@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
27652 "Array Controller Device"
27653 };
27654
27655-static char *chtostr(u8 * chars, int n)
27656-{
27657- char tmp[256];
27658- tmp[0] = 0;
27659- return strncat(tmp, (char *)chars, n);
27660-}
27661-
27662 static int i2o_report_query_status(struct seq_file *seq, int block_status,
27663 char *group)
27664 {
27665@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
27666
27667 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
27668 seq_printf(seq, "%-#8x", ddm_table.module_id);
27669- seq_printf(seq, "%-29s",
27670- chtostr(ddm_table.module_name_version, 28));
27671+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
27672 seq_printf(seq, "%9d ", ddm_table.data_size);
27673 seq_printf(seq, "%8d", ddm_table.code_size);
27674
27675@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
27676
27677 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
27678 seq_printf(seq, "%-#8x", dst->module_id);
27679- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
27680- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
27681+ seq_printf(seq, "%-.28s", dst->module_name_version);
27682+ seq_printf(seq, "%-.8s", dst->date);
27683 seq_printf(seq, "%8d ", dst->module_size);
27684 seq_printf(seq, "%8d ", dst->mpb_size);
27685 seq_printf(seq, "0x%04x", dst->module_flags);
27686@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
27687 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
27688 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
27689 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
27690- seq_printf(seq, "Vendor info : %s\n",
27691- chtostr((u8 *) (work32 + 2), 16));
27692- seq_printf(seq, "Product info : %s\n",
27693- chtostr((u8 *) (work32 + 6), 16));
27694- seq_printf(seq, "Description : %s\n",
27695- chtostr((u8 *) (work32 + 10), 16));
27696- seq_printf(seq, "Product rev. : %s\n",
27697- chtostr((u8 *) (work32 + 14), 8));
27698+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
27699+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
27700+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
27701+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
27702
27703 seq_printf(seq, "Serial number : ");
27704 print_serial_number(seq, (u8 *) (work32 + 16),
27705@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
27706 }
27707
27708 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
27709- seq_printf(seq, "Module name : %s\n",
27710- chtostr(result.module_name, 24));
27711- seq_printf(seq, "Module revision : %s\n",
27712- chtostr(result.module_rev, 8));
27713+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
27714+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
27715
27716 seq_printf(seq, "Serial number : ");
27717 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
27718@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
27719 return 0;
27720 }
27721
27722- seq_printf(seq, "Device name : %s\n",
27723- chtostr(result.device_name, 64));
27724- seq_printf(seq, "Service name : %s\n",
27725- chtostr(result.service_name, 64));
27726- seq_printf(seq, "Physical name : %s\n",
27727- chtostr(result.physical_location, 64));
27728- seq_printf(seq, "Instance number : %s\n",
27729- chtostr(result.instance_number, 4));
27730+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
27731+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
27732+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
27733+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
27734
27735 return 0;
27736 }
27737diff -urNp linux-3.0.4/drivers/message/i2o/iop.c linux-3.0.4/drivers/message/i2o/iop.c
27738--- linux-3.0.4/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
27739+++ linux-3.0.4/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
27740@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
27741
27742 spin_lock_irqsave(&c->context_list_lock, flags);
27743
27744- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
27745- atomic_inc(&c->context_list_counter);
27746+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
27747+ atomic_inc_unchecked(&c->context_list_counter);
27748
27749- entry->context = atomic_read(&c->context_list_counter);
27750+ entry->context = atomic_read_unchecked(&c->context_list_counter);
27751
27752 list_add(&entry->list, &c->context_list);
27753
27754@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
27755
27756 #if BITS_PER_LONG == 64
27757 spin_lock_init(&c->context_list_lock);
27758- atomic_set(&c->context_list_counter, 0);
27759+ atomic_set_unchecked(&c->context_list_counter, 0);
27760 INIT_LIST_HEAD(&c->context_list);
27761 #endif
27762
27763diff -urNp linux-3.0.4/drivers/mfd/abx500-core.c linux-3.0.4/drivers/mfd/abx500-core.c
27764--- linux-3.0.4/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
27765+++ linux-3.0.4/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
27766@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
27767
27768 struct abx500_device_entry {
27769 struct list_head list;
27770- struct abx500_ops ops;
27771+ abx500_ops_no_const ops;
27772 struct device *dev;
27773 };
27774
27775diff -urNp linux-3.0.4/drivers/mfd/janz-cmodio.c linux-3.0.4/drivers/mfd/janz-cmodio.c
27776--- linux-3.0.4/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
27777+++ linux-3.0.4/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
27778@@ -13,6 +13,7 @@
27779
27780 #include <linux/kernel.h>
27781 #include <linux/module.h>
27782+#include <linux/slab.h>
27783 #include <linux/init.h>
27784 #include <linux/pci.h>
27785 #include <linux/interrupt.h>
27786diff -urNp linux-3.0.4/drivers/mfd/wm8350-i2c.c linux-3.0.4/drivers/mfd/wm8350-i2c.c
27787--- linux-3.0.4/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
27788+++ linux-3.0.4/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
27789@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
27790 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
27791 int ret;
27792
27793+ pax_track_stack();
27794+
27795 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
27796 return -EINVAL;
27797
27798diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c
27799--- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-07-21 22:17:23.000000000 -0400
27800+++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-23 21:47:55.000000000 -0400
27801@@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
27802 * the lid is closed. This leads to interrupts as soon as a little move
27803 * is done.
27804 */
27805- atomic_inc(&lis3_dev.count);
27806+ atomic_inc_unchecked(&lis3_dev.count);
27807
27808 wake_up_interruptible(&lis3_dev.misc_wait);
27809 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
27810@@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
27811 if (lis3_dev.pm_dev)
27812 pm_runtime_get_sync(lis3_dev.pm_dev);
27813
27814- atomic_set(&lis3_dev.count, 0);
27815+ atomic_set_unchecked(&lis3_dev.count, 0);
27816 return 0;
27817 }
27818
27819@@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
27820 add_wait_queue(&lis3_dev.misc_wait, &wait);
27821 while (true) {
27822 set_current_state(TASK_INTERRUPTIBLE);
27823- data = atomic_xchg(&lis3_dev.count, 0);
27824+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
27825 if (data)
27826 break;
27827
27828@@ -583,7 +583,7 @@ out:
27829 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
27830 {
27831 poll_wait(file, &lis3_dev.misc_wait, wait);
27832- if (atomic_read(&lis3_dev.count))
27833+ if (atomic_read_unchecked(&lis3_dev.count))
27834 return POLLIN | POLLRDNORM;
27835 return 0;
27836 }
27837diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h
27838--- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
27839+++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
27840@@ -265,7 +265,7 @@ struct lis3lv02d {
27841 struct input_polled_dev *idev; /* input device */
27842 struct platform_device *pdev; /* platform device */
27843 struct regulator_bulk_data regulators[2];
27844- atomic_t count; /* interrupt count after last read */
27845+ atomic_unchecked_t count; /* interrupt count after last read */
27846 union axis_conversion ac; /* hw -> logical axis */
27847 int mapped_btns[3];
27848
27849diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c
27850--- linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
27851+++ linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
27852@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
27853 unsigned long nsec;
27854
27855 nsec = CLKS2NSEC(clks);
27856- atomic_long_inc(&mcs_op_statistics[op].count);
27857- atomic_long_add(nsec, &mcs_op_statistics[op].total);
27858+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
27859+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
27860 if (mcs_op_statistics[op].max < nsec)
27861 mcs_op_statistics[op].max = nsec;
27862 }
27863diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c
27864--- linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
27865+++ linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
27866@@ -32,9 +32,9 @@
27867
27868 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
27869
27870-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
27871+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
27872 {
27873- unsigned long val = atomic_long_read(v);
27874+ unsigned long val = atomic_long_read_unchecked(v);
27875
27876 seq_printf(s, "%16lu %s\n", val, id);
27877 }
27878@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
27879
27880 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
27881 for (op = 0; op < mcsop_last; op++) {
27882- count = atomic_long_read(&mcs_op_statistics[op].count);
27883- total = atomic_long_read(&mcs_op_statistics[op].total);
27884+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
27885+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
27886 max = mcs_op_statistics[op].max;
27887 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
27888 count ? total / count : 0, max);
27889diff -urNp linux-3.0.4/drivers/misc/sgi-gru/grutables.h linux-3.0.4/drivers/misc/sgi-gru/grutables.h
27890--- linux-3.0.4/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
27891+++ linux-3.0.4/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
27892@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
27893 * GRU statistics.
27894 */
27895 struct gru_stats_s {
27896- atomic_long_t vdata_alloc;
27897- atomic_long_t vdata_free;
27898- atomic_long_t gts_alloc;
27899- atomic_long_t gts_free;
27900- atomic_long_t gms_alloc;
27901- atomic_long_t gms_free;
27902- atomic_long_t gts_double_allocate;
27903- atomic_long_t assign_context;
27904- atomic_long_t assign_context_failed;
27905- atomic_long_t free_context;
27906- atomic_long_t load_user_context;
27907- atomic_long_t load_kernel_context;
27908- atomic_long_t lock_kernel_context;
27909- atomic_long_t unlock_kernel_context;
27910- atomic_long_t steal_user_context;
27911- atomic_long_t steal_kernel_context;
27912- atomic_long_t steal_context_failed;
27913- atomic_long_t nopfn;
27914- atomic_long_t asid_new;
27915- atomic_long_t asid_next;
27916- atomic_long_t asid_wrap;
27917- atomic_long_t asid_reuse;
27918- atomic_long_t intr;
27919- atomic_long_t intr_cbr;
27920- atomic_long_t intr_tfh;
27921- atomic_long_t intr_spurious;
27922- atomic_long_t intr_mm_lock_failed;
27923- atomic_long_t call_os;
27924- atomic_long_t call_os_wait_queue;
27925- atomic_long_t user_flush_tlb;
27926- atomic_long_t user_unload_context;
27927- atomic_long_t user_exception;
27928- atomic_long_t set_context_option;
27929- atomic_long_t check_context_retarget_intr;
27930- atomic_long_t check_context_unload;
27931- atomic_long_t tlb_dropin;
27932- atomic_long_t tlb_preload_page;
27933- atomic_long_t tlb_dropin_fail_no_asid;
27934- atomic_long_t tlb_dropin_fail_upm;
27935- atomic_long_t tlb_dropin_fail_invalid;
27936- atomic_long_t tlb_dropin_fail_range_active;
27937- atomic_long_t tlb_dropin_fail_idle;
27938- atomic_long_t tlb_dropin_fail_fmm;
27939- atomic_long_t tlb_dropin_fail_no_exception;
27940- atomic_long_t tfh_stale_on_fault;
27941- atomic_long_t mmu_invalidate_range;
27942- atomic_long_t mmu_invalidate_page;
27943- atomic_long_t flush_tlb;
27944- atomic_long_t flush_tlb_gru;
27945- atomic_long_t flush_tlb_gru_tgh;
27946- atomic_long_t flush_tlb_gru_zero_asid;
27947-
27948- atomic_long_t copy_gpa;
27949- atomic_long_t read_gpa;
27950-
27951- atomic_long_t mesq_receive;
27952- atomic_long_t mesq_receive_none;
27953- atomic_long_t mesq_send;
27954- atomic_long_t mesq_send_failed;
27955- atomic_long_t mesq_noop;
27956- atomic_long_t mesq_send_unexpected_error;
27957- atomic_long_t mesq_send_lb_overflow;
27958- atomic_long_t mesq_send_qlimit_reached;
27959- atomic_long_t mesq_send_amo_nacked;
27960- atomic_long_t mesq_send_put_nacked;
27961- atomic_long_t mesq_page_overflow;
27962- atomic_long_t mesq_qf_locked;
27963- atomic_long_t mesq_qf_noop_not_full;
27964- atomic_long_t mesq_qf_switch_head_failed;
27965- atomic_long_t mesq_qf_unexpected_error;
27966- atomic_long_t mesq_noop_unexpected_error;
27967- atomic_long_t mesq_noop_lb_overflow;
27968- atomic_long_t mesq_noop_qlimit_reached;
27969- atomic_long_t mesq_noop_amo_nacked;
27970- atomic_long_t mesq_noop_put_nacked;
27971- atomic_long_t mesq_noop_page_overflow;
27972+ atomic_long_unchecked_t vdata_alloc;
27973+ atomic_long_unchecked_t vdata_free;
27974+ atomic_long_unchecked_t gts_alloc;
27975+ atomic_long_unchecked_t gts_free;
27976+ atomic_long_unchecked_t gms_alloc;
27977+ atomic_long_unchecked_t gms_free;
27978+ atomic_long_unchecked_t gts_double_allocate;
27979+ atomic_long_unchecked_t assign_context;
27980+ atomic_long_unchecked_t assign_context_failed;
27981+ atomic_long_unchecked_t free_context;
27982+ atomic_long_unchecked_t load_user_context;
27983+ atomic_long_unchecked_t load_kernel_context;
27984+ atomic_long_unchecked_t lock_kernel_context;
27985+ atomic_long_unchecked_t unlock_kernel_context;
27986+ atomic_long_unchecked_t steal_user_context;
27987+ atomic_long_unchecked_t steal_kernel_context;
27988+ atomic_long_unchecked_t steal_context_failed;
27989+ atomic_long_unchecked_t nopfn;
27990+ atomic_long_unchecked_t asid_new;
27991+ atomic_long_unchecked_t asid_next;
27992+ atomic_long_unchecked_t asid_wrap;
27993+ atomic_long_unchecked_t asid_reuse;
27994+ atomic_long_unchecked_t intr;
27995+ atomic_long_unchecked_t intr_cbr;
27996+ atomic_long_unchecked_t intr_tfh;
27997+ atomic_long_unchecked_t intr_spurious;
27998+ atomic_long_unchecked_t intr_mm_lock_failed;
27999+ atomic_long_unchecked_t call_os;
28000+ atomic_long_unchecked_t call_os_wait_queue;
28001+ atomic_long_unchecked_t user_flush_tlb;
28002+ atomic_long_unchecked_t user_unload_context;
28003+ atomic_long_unchecked_t user_exception;
28004+ atomic_long_unchecked_t set_context_option;
28005+ atomic_long_unchecked_t check_context_retarget_intr;
28006+ atomic_long_unchecked_t check_context_unload;
28007+ atomic_long_unchecked_t tlb_dropin;
28008+ atomic_long_unchecked_t tlb_preload_page;
28009+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28010+ atomic_long_unchecked_t tlb_dropin_fail_upm;
28011+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
28012+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
28013+ atomic_long_unchecked_t tlb_dropin_fail_idle;
28014+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
28015+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28016+ atomic_long_unchecked_t tfh_stale_on_fault;
28017+ atomic_long_unchecked_t mmu_invalidate_range;
28018+ atomic_long_unchecked_t mmu_invalidate_page;
28019+ atomic_long_unchecked_t flush_tlb;
28020+ atomic_long_unchecked_t flush_tlb_gru;
28021+ atomic_long_unchecked_t flush_tlb_gru_tgh;
28022+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28023+
28024+ atomic_long_unchecked_t copy_gpa;
28025+ atomic_long_unchecked_t read_gpa;
28026+
28027+ atomic_long_unchecked_t mesq_receive;
28028+ atomic_long_unchecked_t mesq_receive_none;
28029+ atomic_long_unchecked_t mesq_send;
28030+ atomic_long_unchecked_t mesq_send_failed;
28031+ atomic_long_unchecked_t mesq_noop;
28032+ atomic_long_unchecked_t mesq_send_unexpected_error;
28033+ atomic_long_unchecked_t mesq_send_lb_overflow;
28034+ atomic_long_unchecked_t mesq_send_qlimit_reached;
28035+ atomic_long_unchecked_t mesq_send_amo_nacked;
28036+ atomic_long_unchecked_t mesq_send_put_nacked;
28037+ atomic_long_unchecked_t mesq_page_overflow;
28038+ atomic_long_unchecked_t mesq_qf_locked;
28039+ atomic_long_unchecked_t mesq_qf_noop_not_full;
28040+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
28041+ atomic_long_unchecked_t mesq_qf_unexpected_error;
28042+ atomic_long_unchecked_t mesq_noop_unexpected_error;
28043+ atomic_long_unchecked_t mesq_noop_lb_overflow;
28044+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
28045+ atomic_long_unchecked_t mesq_noop_amo_nacked;
28046+ atomic_long_unchecked_t mesq_noop_put_nacked;
28047+ atomic_long_unchecked_t mesq_noop_page_overflow;
28048
28049 };
28050
28051@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28052 tghop_invalidate, mcsop_last};
28053
28054 struct mcs_op_statistic {
28055- atomic_long_t count;
28056- atomic_long_t total;
28057+ atomic_long_unchecked_t count;
28058+ atomic_long_unchecked_t total;
28059 unsigned long max;
28060 };
28061
28062@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28063
28064 #define STAT(id) do { \
28065 if (gru_options & OPT_STATS) \
28066- atomic_long_inc(&gru_stats.id); \
28067+ atomic_long_inc_unchecked(&gru_stats.id); \
28068 } while (0)
28069
28070 #ifdef CONFIG_SGI_GRU_DEBUG
28071diff -urNp linux-3.0.4/drivers/misc/sgi-xp/xp.h linux-3.0.4/drivers/misc/sgi-xp/xp.h
28072--- linux-3.0.4/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
28073+++ linux-3.0.4/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
28074@@ -289,7 +289,7 @@ struct xpc_interface {
28075 xpc_notify_func, void *);
28076 void (*received) (short, int, void *);
28077 enum xp_retval (*partid_to_nasids) (short, void *);
28078-};
28079+} __no_const;
28080
28081 extern struct xpc_interface xpc_interface;
28082
28083diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c
28084--- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
28085+++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
28086@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28087 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28088 unsigned long timeo = jiffies + HZ;
28089
28090+ pax_track_stack();
28091+
28092 /* Prevent setting state FL_SYNCING for chip in suspended state. */
28093 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28094 goto sleep;
28095@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
28096 unsigned long initial_adr;
28097 int initial_len = len;
28098
28099+ pax_track_stack();
28100+
28101 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28102 adr += chip->start;
28103 initial_adr = adr;
28104@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
28105 int retries = 3;
28106 int ret;
28107
28108+ pax_track_stack();
28109+
28110 adr += chip->start;
28111
28112 retry:
28113diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c
28114--- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
28115+++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
28116@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28117 unsigned long cmd_addr;
28118 struct cfi_private *cfi = map->fldrv_priv;
28119
28120+ pax_track_stack();
28121+
28122 adr += chip->start;
28123
28124 /* Ensure cmd read/writes are aligned. */
28125@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
28126 DECLARE_WAITQUEUE(wait, current);
28127 int wbufsize, z;
28128
28129+ pax_track_stack();
28130+
28131 /* M58LW064A requires bus alignment for buffer wriets -- saw */
28132 if (adr & (map_bankwidth(map)-1))
28133 return -EINVAL;
28134@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
28135 DECLARE_WAITQUEUE(wait, current);
28136 int ret = 0;
28137
28138+ pax_track_stack();
28139+
28140 adr += chip->start;
28141
28142 /* Let's determine this according to the interleave only once */
28143@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
28144 unsigned long timeo = jiffies + HZ;
28145 DECLARE_WAITQUEUE(wait, current);
28146
28147+ pax_track_stack();
28148+
28149 adr += chip->start;
28150
28151 /* Let's determine this according to the interleave only once */
28152@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
28153 unsigned long timeo = jiffies + HZ;
28154 DECLARE_WAITQUEUE(wait, current);
28155
28156+ pax_track_stack();
28157+
28158 adr += chip->start;
28159
28160 /* Let's determine this according to the interleave only once */
28161diff -urNp linux-3.0.4/drivers/mtd/devices/doc2000.c linux-3.0.4/drivers/mtd/devices/doc2000.c
28162--- linux-3.0.4/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
28163+++ linux-3.0.4/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
28164@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28165
28166 /* The ECC will not be calculated correctly if less than 512 is written */
28167 /* DBB-
28168- if (len != 0x200 && eccbuf)
28169+ if (len != 0x200)
28170 printk(KERN_WARNING
28171 "ECC needs a full sector write (adr: %lx size %lx)\n",
28172 (long) to, (long) len);
28173diff -urNp linux-3.0.4/drivers/mtd/devices/doc2001.c linux-3.0.4/drivers/mtd/devices/doc2001.c
28174--- linux-3.0.4/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
28175+++ linux-3.0.4/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
28176@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28177 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28178
28179 /* Don't allow read past end of device */
28180- if (from >= this->totlen)
28181+ if (from >= this->totlen || !len)
28182 return -EINVAL;
28183
28184 /* Don't allow a single read to cross a 512-byte block boundary */
28185diff -urNp linux-3.0.4/drivers/mtd/ftl.c linux-3.0.4/drivers/mtd/ftl.c
28186--- linux-3.0.4/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
28187+++ linux-3.0.4/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
28188@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28189 loff_t offset;
28190 uint16_t srcunitswap = cpu_to_le16(srcunit);
28191
28192+ pax_track_stack();
28193+
28194 eun = &part->EUNInfo[srcunit];
28195 xfer = &part->XferInfo[xferunit];
28196 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28197diff -urNp linux-3.0.4/drivers/mtd/inftlcore.c linux-3.0.4/drivers/mtd/inftlcore.c
28198--- linux-3.0.4/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
28199+++ linux-3.0.4/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
28200@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28201 struct inftl_oob oob;
28202 size_t retlen;
28203
28204+ pax_track_stack();
28205+
28206 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28207 "pending=%d)\n", inftl, thisVUC, pendingblock);
28208
28209diff -urNp linux-3.0.4/drivers/mtd/inftlmount.c linux-3.0.4/drivers/mtd/inftlmount.c
28210--- linux-3.0.4/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
28211+++ linux-3.0.4/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
28212@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28213 struct INFTLPartition *ip;
28214 size_t retlen;
28215
28216+ pax_track_stack();
28217+
28218 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28219
28220 /*
28221diff -urNp linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c
28222--- linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
28223+++ linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
28224@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28225 {
28226 map_word pfow_val[4];
28227
28228+ pax_track_stack();
28229+
28230 /* Check identification string */
28231 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28232 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28233diff -urNp linux-3.0.4/drivers/mtd/mtdchar.c linux-3.0.4/drivers/mtd/mtdchar.c
28234--- linux-3.0.4/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
28235+++ linux-3.0.4/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
28236@@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
28237 u_long size;
28238 struct mtd_info_user info;
28239
28240+ pax_track_stack();
28241+
28242 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28243
28244 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28245diff -urNp linux-3.0.4/drivers/mtd/nand/denali.c linux-3.0.4/drivers/mtd/nand/denali.c
28246--- linux-3.0.4/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
28247+++ linux-3.0.4/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
28248@@ -26,6 +26,7 @@
28249 #include <linux/pci.h>
28250 #include <linux/mtd/mtd.h>
28251 #include <linux/module.h>
28252+#include <linux/slab.h>
28253
28254 #include "denali.h"
28255
28256diff -urNp linux-3.0.4/drivers/mtd/nftlcore.c linux-3.0.4/drivers/mtd/nftlcore.c
28257--- linux-3.0.4/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
28258+++ linux-3.0.4/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
28259@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28260 int inplace = 1;
28261 size_t retlen;
28262
28263+ pax_track_stack();
28264+
28265 memset(BlockMap, 0xff, sizeof(BlockMap));
28266 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28267
28268diff -urNp linux-3.0.4/drivers/mtd/nftlmount.c linux-3.0.4/drivers/mtd/nftlmount.c
28269--- linux-3.0.4/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
28270+++ linux-3.0.4/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
28271@@ -24,6 +24,7 @@
28272 #include <asm/errno.h>
28273 #include <linux/delay.h>
28274 #include <linux/slab.h>
28275+#include <linux/sched.h>
28276 #include <linux/mtd/mtd.h>
28277 #include <linux/mtd/nand.h>
28278 #include <linux/mtd/nftl.h>
28279@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28280 struct mtd_info *mtd = nftl->mbd.mtd;
28281 unsigned int i;
28282
28283+ pax_track_stack();
28284+
28285 /* Assume logical EraseSize == physical erasesize for starting the scan.
28286 We'll sort it out later if we find a MediaHeader which says otherwise */
28287 /* Actually, we won't. The new DiskOnChip driver has already scanned
28288diff -urNp linux-3.0.4/drivers/mtd/ubi/build.c linux-3.0.4/drivers/mtd/ubi/build.c
28289--- linux-3.0.4/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
28290+++ linux-3.0.4/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
28291@@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28292 static int __init bytes_str_to_int(const char *str)
28293 {
28294 char *endp;
28295- unsigned long result;
28296+ unsigned long result, scale = 1;
28297
28298 result = simple_strtoul(str, &endp, 0);
28299 if (str == endp || result >= INT_MAX) {
28300@@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28301
28302 switch (*endp) {
28303 case 'G':
28304- result *= 1024;
28305+ scale *= 1024;
28306 case 'M':
28307- result *= 1024;
28308+ scale *= 1024;
28309 case 'K':
28310- result *= 1024;
28311+ scale *= 1024;
28312 if (endp[1] == 'i' && endp[2] == 'B')
28313 endp += 2;
28314 case '\0':
28315@@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28316 return -EINVAL;
28317 }
28318
28319- return result;
28320+ if ((intoverflow_t)result*scale >= INT_MAX) {
28321+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28322+ str);
28323+ return -EINVAL;
28324+ }
28325+
28326+ return result*scale;
28327 }
28328
28329 /**
28330diff -urNp linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c
28331--- linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
28332+++ linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
28333@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28334 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28335 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28336
28337-static struct bfa_ioc_hwif nw_hwif_ct;
28338+static struct bfa_ioc_hwif nw_hwif_ct = {
28339+ .ioc_pll_init = bfa_ioc_ct_pll_init,
28340+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28341+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28342+ .ioc_reg_init = bfa_ioc_ct_reg_init,
28343+ .ioc_map_port = bfa_ioc_ct_map_port,
28344+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28345+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28346+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28347+ .ioc_sync_start = bfa_ioc_ct_sync_start,
28348+ .ioc_sync_join = bfa_ioc_ct_sync_join,
28349+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28350+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28351+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
28352+};
28353
28354 /**
28355 * Called from bfa_ioc_attach() to map asic specific calls.
28356@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28357 void
28358 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28359 {
28360- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28361- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28362- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28363- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28364- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28365- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28366- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28367- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28368- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28369- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28370- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28371- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28372- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28373-
28374 ioc->ioc_hwif = &nw_hwif_ct;
28375 }
28376
28377diff -urNp linux-3.0.4/drivers/net/bna/bnad.c linux-3.0.4/drivers/net/bna/bnad.c
28378--- linux-3.0.4/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
28379+++ linux-3.0.4/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
28380@@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28381 struct bna_intr_info *intr_info =
28382 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28383 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28384- struct bna_tx_event_cbfn tx_cbfn;
28385+ static struct bna_tx_event_cbfn tx_cbfn = {
28386+ /* Initialize the tx event handlers */
28387+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
28388+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28389+ .tx_stall_cbfn = bnad_cb_tx_stall,
28390+ .tx_resume_cbfn = bnad_cb_tx_resume,
28391+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28392+ };
28393 struct bna_tx *tx;
28394 unsigned long flags;
28395
28396@@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28397 tx_config->txq_depth = bnad->txq_depth;
28398 tx_config->tx_type = BNA_TX_T_REGULAR;
28399
28400- /* Initialize the tx event handlers */
28401- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28402- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28403- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28404- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28405- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28406-
28407 /* Get BNA's resource requirement for one tx object */
28408 spin_lock_irqsave(&bnad->bna_lock, flags);
28409 bna_tx_res_req(bnad->num_txq_per_tx,
28410@@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28411 struct bna_intr_info *intr_info =
28412 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28413 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28414- struct bna_rx_event_cbfn rx_cbfn;
28415+ static struct bna_rx_event_cbfn rx_cbfn = {
28416+ /* Initialize the Rx event handlers */
28417+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
28418+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28419+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
28420+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28421+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28422+ .rx_post_cbfn = bnad_cb_rx_post
28423+ };
28424 struct bna_rx *rx;
28425 unsigned long flags;
28426
28427 /* Initialize the Rx object configuration */
28428 bnad_init_rx_config(bnad, rx_config);
28429
28430- /* Initialize the Rx event handlers */
28431- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28432- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28433- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28434- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28435- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28436- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28437-
28438 /* Get BNA's resource requirement for one Rx object */
28439 spin_lock_irqsave(&bnad->bna_lock, flags);
28440 bna_rx_res_req(rx_config, res_info);
28441diff -urNp linux-3.0.4/drivers/net/bnx2.c linux-3.0.4/drivers/net/bnx2.c
28442--- linux-3.0.4/drivers/net/bnx2.c 2011-07-21 22:17:23.000000000 -0400
28443+++ linux-3.0.4/drivers/net/bnx2.c 2011-08-23 21:48:14.000000000 -0400
28444@@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
28445 int rc = 0;
28446 u32 magic, csum;
28447
28448+ pax_track_stack();
28449+
28450 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
28451 goto test_nvram_done;
28452
28453diff -urNp linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c
28454--- linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
28455+++ linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
28456@@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
28457 int i, rc;
28458 u32 magic, crc;
28459
28460+ pax_track_stack();
28461+
28462 if (BP_NOMCP(bp))
28463 return 0;
28464
28465diff -urNp linux-3.0.4/drivers/net/cxgb3/l2t.h linux-3.0.4/drivers/net/cxgb3/l2t.h
28466--- linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-07-21 22:17:23.000000000 -0400
28467+++ linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-08-23 21:47:55.000000000 -0400
28468@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
28469 */
28470 struct l2t_skb_cb {
28471 arp_failure_handler_func arp_failure_handler;
28472-};
28473+} __no_const;
28474
28475 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
28476
28477diff -urNp linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c
28478--- linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
28479+++ linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
28480@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
28481 unsigned int nchan = adap->params.nports;
28482 struct msix_entry entries[MAX_INGQ + 1];
28483
28484+ pax_track_stack();
28485+
28486 for (i = 0; i < ARRAY_SIZE(entries); ++i)
28487 entries[i].entry = i;
28488
28489diff -urNp linux-3.0.4/drivers/net/cxgb4/t4_hw.c linux-3.0.4/drivers/net/cxgb4/t4_hw.c
28490--- linux-3.0.4/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
28491+++ linux-3.0.4/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
28492@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
28493 u8 vpd[VPD_LEN], csum;
28494 unsigned int vpdr_len, kw_offset, id_len;
28495
28496+ pax_track_stack();
28497+
28498 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
28499 if (ret < 0)
28500 return ret;
28501diff -urNp linux-3.0.4/drivers/net/e1000e/82571.c linux-3.0.4/drivers/net/e1000e/82571.c
28502--- linux-3.0.4/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
28503+++ linux-3.0.4/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
28504@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
28505 {
28506 struct e1000_hw *hw = &adapter->hw;
28507 struct e1000_mac_info *mac = &hw->mac;
28508- struct e1000_mac_operations *func = &mac->ops;
28509+ e1000_mac_operations_no_const *func = &mac->ops;
28510 u32 swsm = 0;
28511 u32 swsm2 = 0;
28512 bool force_clear_smbi = false;
28513diff -urNp linux-3.0.4/drivers/net/e1000e/es2lan.c linux-3.0.4/drivers/net/e1000e/es2lan.c
28514--- linux-3.0.4/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
28515+++ linux-3.0.4/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
28516@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
28517 {
28518 struct e1000_hw *hw = &adapter->hw;
28519 struct e1000_mac_info *mac = &hw->mac;
28520- struct e1000_mac_operations *func = &mac->ops;
28521+ e1000_mac_operations_no_const *func = &mac->ops;
28522
28523 /* Set media type */
28524 switch (adapter->pdev->device) {
28525diff -urNp linux-3.0.4/drivers/net/e1000e/hw.h linux-3.0.4/drivers/net/e1000e/hw.h
28526--- linux-3.0.4/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
28527+++ linux-3.0.4/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
28528@@ -776,6 +776,7 @@ struct e1000_mac_operations {
28529 void (*write_vfta)(struct e1000_hw *, u32, u32);
28530 s32 (*read_mac_addr)(struct e1000_hw *);
28531 };
28532+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28533
28534 /* Function pointers for the PHY. */
28535 struct e1000_phy_operations {
28536@@ -799,6 +800,7 @@ struct e1000_phy_operations {
28537 void (*power_up)(struct e1000_hw *);
28538 void (*power_down)(struct e1000_hw *);
28539 };
28540+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28541
28542 /* Function pointers for the NVM. */
28543 struct e1000_nvm_operations {
28544@@ -810,9 +812,10 @@ struct e1000_nvm_operations {
28545 s32 (*validate)(struct e1000_hw *);
28546 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
28547 };
28548+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28549
28550 struct e1000_mac_info {
28551- struct e1000_mac_operations ops;
28552+ e1000_mac_operations_no_const ops;
28553 u8 addr[ETH_ALEN];
28554 u8 perm_addr[ETH_ALEN];
28555
28556@@ -853,7 +856,7 @@ struct e1000_mac_info {
28557 };
28558
28559 struct e1000_phy_info {
28560- struct e1000_phy_operations ops;
28561+ e1000_phy_operations_no_const ops;
28562
28563 enum e1000_phy_type type;
28564
28565@@ -887,7 +890,7 @@ struct e1000_phy_info {
28566 };
28567
28568 struct e1000_nvm_info {
28569- struct e1000_nvm_operations ops;
28570+ e1000_nvm_operations_no_const ops;
28571
28572 enum e1000_nvm_type type;
28573 enum e1000_nvm_override override;
28574diff -urNp linux-3.0.4/drivers/net/hamradio/6pack.c linux-3.0.4/drivers/net/hamradio/6pack.c
28575--- linux-3.0.4/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
28576+++ linux-3.0.4/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
28577@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
28578 unsigned char buf[512];
28579 int count1;
28580
28581+ pax_track_stack();
28582+
28583 if (!count)
28584 return;
28585
28586diff -urNp linux-3.0.4/drivers/net/igb/e1000_hw.h linux-3.0.4/drivers/net/igb/e1000_hw.h
28587--- linux-3.0.4/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
28588+++ linux-3.0.4/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
28589@@ -314,6 +314,7 @@ struct e1000_mac_operations {
28590 s32 (*read_mac_addr)(struct e1000_hw *);
28591 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
28592 };
28593+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28594
28595 struct e1000_phy_operations {
28596 s32 (*acquire)(struct e1000_hw *);
28597@@ -330,6 +331,7 @@ struct e1000_phy_operations {
28598 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
28599 s32 (*write_reg)(struct e1000_hw *, u32, u16);
28600 };
28601+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28602
28603 struct e1000_nvm_operations {
28604 s32 (*acquire)(struct e1000_hw *);
28605@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
28606 s32 (*update)(struct e1000_hw *);
28607 s32 (*validate)(struct e1000_hw *);
28608 };
28609+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28610
28611 struct e1000_info {
28612 s32 (*get_invariants)(struct e1000_hw *);
28613@@ -350,7 +353,7 @@ struct e1000_info {
28614 extern const struct e1000_info e1000_82575_info;
28615
28616 struct e1000_mac_info {
28617- struct e1000_mac_operations ops;
28618+ e1000_mac_operations_no_const ops;
28619
28620 u8 addr[6];
28621 u8 perm_addr[6];
28622@@ -388,7 +391,7 @@ struct e1000_mac_info {
28623 };
28624
28625 struct e1000_phy_info {
28626- struct e1000_phy_operations ops;
28627+ e1000_phy_operations_no_const ops;
28628
28629 enum e1000_phy_type type;
28630
28631@@ -423,7 +426,7 @@ struct e1000_phy_info {
28632 };
28633
28634 struct e1000_nvm_info {
28635- struct e1000_nvm_operations ops;
28636+ e1000_nvm_operations_no_const ops;
28637 enum e1000_nvm_type type;
28638 enum e1000_nvm_override override;
28639
28640@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
28641 s32 (*check_for_ack)(struct e1000_hw *, u16);
28642 s32 (*check_for_rst)(struct e1000_hw *, u16);
28643 };
28644+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28645
28646 struct e1000_mbx_stats {
28647 u32 msgs_tx;
28648@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
28649 };
28650
28651 struct e1000_mbx_info {
28652- struct e1000_mbx_operations ops;
28653+ e1000_mbx_operations_no_const ops;
28654 struct e1000_mbx_stats stats;
28655 u32 timeout;
28656 u32 usec_delay;
28657diff -urNp linux-3.0.4/drivers/net/igbvf/vf.h linux-3.0.4/drivers/net/igbvf/vf.h
28658--- linux-3.0.4/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
28659+++ linux-3.0.4/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
28660@@ -189,9 +189,10 @@ struct e1000_mac_operations {
28661 s32 (*read_mac_addr)(struct e1000_hw *);
28662 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
28663 };
28664+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28665
28666 struct e1000_mac_info {
28667- struct e1000_mac_operations ops;
28668+ e1000_mac_operations_no_const ops;
28669 u8 addr[6];
28670 u8 perm_addr[6];
28671
28672@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
28673 s32 (*check_for_ack)(struct e1000_hw *);
28674 s32 (*check_for_rst)(struct e1000_hw *);
28675 };
28676+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28677
28678 struct e1000_mbx_stats {
28679 u32 msgs_tx;
28680@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
28681 };
28682
28683 struct e1000_mbx_info {
28684- struct e1000_mbx_operations ops;
28685+ e1000_mbx_operations_no_const ops;
28686 struct e1000_mbx_stats stats;
28687 u32 timeout;
28688 u32 usec_delay;
28689diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_main.c linux-3.0.4/drivers/net/ixgb/ixgb_main.c
28690--- linux-3.0.4/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
28691+++ linux-3.0.4/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
28692@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
28693 u32 rctl;
28694 int i;
28695
28696+ pax_track_stack();
28697+
28698 /* Check for Promiscuous and All Multicast modes */
28699
28700 rctl = IXGB_READ_REG(hw, RCTL);
28701diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_param.c linux-3.0.4/drivers/net/ixgb/ixgb_param.c
28702--- linux-3.0.4/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
28703+++ linux-3.0.4/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
28704@@ -261,6 +261,9 @@ void __devinit
28705 ixgb_check_options(struct ixgb_adapter *adapter)
28706 {
28707 int bd = adapter->bd_number;
28708+
28709+ pax_track_stack();
28710+
28711 if (bd >= IXGB_MAX_NIC) {
28712 pr_notice("Warning: no configuration for board #%i\n", bd);
28713 pr_notice("Using defaults for all values\n");
28714diff -urNp linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h
28715--- linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
28716+++ linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
28717@@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
28718 s32 (*update_checksum)(struct ixgbe_hw *);
28719 u16 (*calc_checksum)(struct ixgbe_hw *);
28720 };
28721+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
28722
28723 struct ixgbe_mac_operations {
28724 s32 (*init_hw)(struct ixgbe_hw *);
28725@@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
28726 /* Flow Control */
28727 s32 (*fc_enable)(struct ixgbe_hw *, s32);
28728 };
28729+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28730
28731 struct ixgbe_phy_operations {
28732 s32 (*identify)(struct ixgbe_hw *);
28733@@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
28734 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
28735 s32 (*check_overtemp)(struct ixgbe_hw *);
28736 };
28737+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
28738
28739 struct ixgbe_eeprom_info {
28740- struct ixgbe_eeprom_operations ops;
28741+ ixgbe_eeprom_operations_no_const ops;
28742 enum ixgbe_eeprom_type type;
28743 u32 semaphore_delay;
28744 u16 word_size;
28745@@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
28746
28747 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
28748 struct ixgbe_mac_info {
28749- struct ixgbe_mac_operations ops;
28750+ ixgbe_mac_operations_no_const ops;
28751 enum ixgbe_mac_type type;
28752 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28753 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28754@@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
28755 };
28756
28757 struct ixgbe_phy_info {
28758- struct ixgbe_phy_operations ops;
28759+ ixgbe_phy_operations_no_const ops;
28760 struct mdio_if_info mdio;
28761 enum ixgbe_phy_type type;
28762 u32 id;
28763@@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
28764 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
28765 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
28766 };
28767+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28768
28769 struct ixgbe_mbx_stats {
28770 u32 msgs_tx;
28771@@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
28772 };
28773
28774 struct ixgbe_mbx_info {
28775- struct ixgbe_mbx_operations ops;
28776+ ixgbe_mbx_operations_no_const ops;
28777 struct ixgbe_mbx_stats stats;
28778 u32 timeout;
28779 u32 usec_delay;
28780diff -urNp linux-3.0.4/drivers/net/ixgbevf/vf.h linux-3.0.4/drivers/net/ixgbevf/vf.h
28781--- linux-3.0.4/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
28782+++ linux-3.0.4/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
28783@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
28784 s32 (*clear_vfta)(struct ixgbe_hw *);
28785 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
28786 };
28787+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28788
28789 enum ixgbe_mac_type {
28790 ixgbe_mac_unknown = 0,
28791@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
28792 };
28793
28794 struct ixgbe_mac_info {
28795- struct ixgbe_mac_operations ops;
28796+ ixgbe_mac_operations_no_const ops;
28797 u8 addr[6];
28798 u8 perm_addr[6];
28799
28800@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
28801 s32 (*check_for_ack)(struct ixgbe_hw *);
28802 s32 (*check_for_rst)(struct ixgbe_hw *);
28803 };
28804+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28805
28806 struct ixgbe_mbx_stats {
28807 u32 msgs_tx;
28808@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
28809 };
28810
28811 struct ixgbe_mbx_info {
28812- struct ixgbe_mbx_operations ops;
28813+ ixgbe_mbx_operations_no_const ops;
28814 struct ixgbe_mbx_stats stats;
28815 u32 timeout;
28816 u32 udelay;
28817diff -urNp linux-3.0.4/drivers/net/ksz884x.c linux-3.0.4/drivers/net/ksz884x.c
28818--- linux-3.0.4/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
28819+++ linux-3.0.4/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
28820@@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
28821 int rc;
28822 u64 counter[TOTAL_PORT_COUNTER_NUM];
28823
28824+ pax_track_stack();
28825+
28826 mutex_lock(&hw_priv->lock);
28827 n = SWITCH_PORT_NUM;
28828 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
28829diff -urNp linux-3.0.4/drivers/net/mlx4/main.c linux-3.0.4/drivers/net/mlx4/main.c
28830--- linux-3.0.4/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
28831+++ linux-3.0.4/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
28832@@ -40,6 +40,7 @@
28833 #include <linux/dma-mapping.h>
28834 #include <linux/slab.h>
28835 #include <linux/io-mapping.h>
28836+#include <linux/sched.h>
28837
28838 #include <linux/mlx4/device.h>
28839 #include <linux/mlx4/doorbell.h>
28840@@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
28841 u64 icm_size;
28842 int err;
28843
28844+ pax_track_stack();
28845+
28846 err = mlx4_QUERY_FW(dev);
28847 if (err) {
28848 if (err == -EACCES)
28849diff -urNp linux-3.0.4/drivers/net/niu.c linux-3.0.4/drivers/net/niu.c
28850--- linux-3.0.4/drivers/net/niu.c 2011-08-23 21:44:40.000000000 -0400
28851+++ linux-3.0.4/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
28852@@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
28853 int i, num_irqs, err;
28854 u8 first_ldg;
28855
28856+ pax_track_stack();
28857+
28858 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
28859 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
28860 ldg_num_map[i] = first_ldg + i;
28861diff -urNp linux-3.0.4/drivers/net/pcnet32.c linux-3.0.4/drivers/net/pcnet32.c
28862--- linux-3.0.4/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
28863+++ linux-3.0.4/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
28864@@ -82,7 +82,7 @@ static int cards_found;
28865 /*
28866 * VLB I/O addresses
28867 */
28868-static unsigned int pcnet32_portlist[] __initdata =
28869+static unsigned int pcnet32_portlist[] __devinitdata =
28870 { 0x300, 0x320, 0x340, 0x360, 0 };
28871
28872 static int pcnet32_debug;
28873@@ -270,7 +270,7 @@ struct pcnet32_private {
28874 struct sk_buff **rx_skbuff;
28875 dma_addr_t *tx_dma_addr;
28876 dma_addr_t *rx_dma_addr;
28877- struct pcnet32_access a;
28878+ struct pcnet32_access *a;
28879 spinlock_t lock; /* Guard lock */
28880 unsigned int cur_rx, cur_tx; /* The next free ring entry */
28881 unsigned int rx_ring_size; /* current rx ring size */
28882@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
28883 u16 val;
28884
28885 netif_wake_queue(dev);
28886- val = lp->a.read_csr(ioaddr, CSR3);
28887+ val = lp->a->read_csr(ioaddr, CSR3);
28888 val &= 0x00ff;
28889- lp->a.write_csr(ioaddr, CSR3, val);
28890+ lp->a->write_csr(ioaddr, CSR3, val);
28891 napi_enable(&lp->napi);
28892 }
28893
28894@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
28895 r = mii_link_ok(&lp->mii_if);
28896 } else if (lp->chip_version >= PCNET32_79C970A) {
28897 ulong ioaddr = dev->base_addr; /* card base I/O address */
28898- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
28899+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
28900 } else { /* can not detect link on really old chips */
28901 r = 1;
28902 }
28903@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
28904 pcnet32_netif_stop(dev);
28905
28906 spin_lock_irqsave(&lp->lock, flags);
28907- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28908+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28909
28910 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
28911
28912@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
28913 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
28914 {
28915 struct pcnet32_private *lp = netdev_priv(dev);
28916- struct pcnet32_access *a = &lp->a; /* access to registers */
28917+ struct pcnet32_access *a = lp->a; /* access to registers */
28918 ulong ioaddr = dev->base_addr; /* card base I/O address */
28919 struct sk_buff *skb; /* sk buff */
28920 int x, i; /* counters */
28921@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
28922 pcnet32_netif_stop(dev);
28923
28924 spin_lock_irqsave(&lp->lock, flags);
28925- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28926+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28927
28928 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
28929
28930 /* Reset the PCNET32 */
28931- lp->a.reset(ioaddr);
28932- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28933+ lp->a->reset(ioaddr);
28934+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28935
28936 /* switch pcnet32 to 32bit mode */
28937- lp->a.write_bcr(ioaddr, 20, 2);
28938+ lp->a->write_bcr(ioaddr, 20, 2);
28939
28940 /* purge & init rings but don't actually restart */
28941 pcnet32_restart(dev, 0x0000);
28942
28943- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28944+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28945
28946 /* Initialize Transmit buffers. */
28947 size = data_len + 15;
28948@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
28949
28950 /* set int loopback in CSR15 */
28951 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
28952- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
28953+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
28954
28955 teststatus = cpu_to_le16(0x8000);
28956- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28957+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28958
28959 /* Check status of descriptors */
28960 for (x = 0; x < numbuffs; x++) {
28961@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
28962 }
28963 }
28964
28965- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28966+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28967 wmb();
28968 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
28969 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
28970@@ -1015,7 +1015,7 @@ clean_up:
28971 pcnet32_restart(dev, CSR0_NORMAL);
28972 } else {
28973 pcnet32_purge_rx_ring(dev);
28974- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
28975+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
28976 }
28977 spin_unlock_irqrestore(&lp->lock, flags);
28978
28979@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
28980 enum ethtool_phys_id_state state)
28981 {
28982 struct pcnet32_private *lp = netdev_priv(dev);
28983- struct pcnet32_access *a = &lp->a;
28984+ struct pcnet32_access *a = lp->a;
28985 ulong ioaddr = dev->base_addr;
28986 unsigned long flags;
28987 int i;
28988@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
28989 {
28990 int csr5;
28991 struct pcnet32_private *lp = netdev_priv(dev);
28992- struct pcnet32_access *a = &lp->a;
28993+ struct pcnet32_access *a = lp->a;
28994 ulong ioaddr = dev->base_addr;
28995 int ticks;
28996
28997@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
28998 spin_lock_irqsave(&lp->lock, flags);
28999 if (pcnet32_tx(dev)) {
29000 /* reset the chip to clear the error condition, then restart */
29001- lp->a.reset(ioaddr);
29002- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29003+ lp->a->reset(ioaddr);
29004+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29005 pcnet32_restart(dev, CSR0_START);
29006 netif_wake_queue(dev);
29007 }
29008@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
29009 __napi_complete(napi);
29010
29011 /* clear interrupt masks */
29012- val = lp->a.read_csr(ioaddr, CSR3);
29013+ val = lp->a->read_csr(ioaddr, CSR3);
29014 val &= 0x00ff;
29015- lp->a.write_csr(ioaddr, CSR3, val);
29016+ lp->a->write_csr(ioaddr, CSR3, val);
29017
29018 /* Set interrupt enable. */
29019- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29020+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29021
29022 spin_unlock_irqrestore(&lp->lock, flags);
29023 }
29024@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
29025 int i, csr0;
29026 u16 *buff = ptr;
29027 struct pcnet32_private *lp = netdev_priv(dev);
29028- struct pcnet32_access *a = &lp->a;
29029+ struct pcnet32_access *a = lp->a;
29030 ulong ioaddr = dev->base_addr;
29031 unsigned long flags;
29032
29033@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
29034 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29035 if (lp->phymask & (1 << j)) {
29036 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29037- lp->a.write_bcr(ioaddr, 33,
29038+ lp->a->write_bcr(ioaddr, 33,
29039 (j << 5) | i);
29040- *buff++ = lp->a.read_bcr(ioaddr, 34);
29041+ *buff++ = lp->a->read_bcr(ioaddr, 34);
29042 }
29043 }
29044 }
29045@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29046 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29047 lp->options |= PCNET32_PORT_FD;
29048
29049- lp->a = *a;
29050+ lp->a = a;
29051
29052 /* prior to register_netdev, dev->name is not yet correct */
29053 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29054@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29055 if (lp->mii) {
29056 /* lp->phycount and lp->phymask are set to 0 by memset above */
29057
29058- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29059+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29060 /* scan for PHYs */
29061 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29062 unsigned short id1, id2;
29063@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29064 pr_info("Found PHY %04x:%04x at address %d\n",
29065 id1, id2, i);
29066 }
29067- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29068+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29069 if (lp->phycount > 1)
29070 lp->options |= PCNET32_PORT_MII;
29071 }
29072@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
29073 }
29074
29075 /* Reset the PCNET32 */
29076- lp->a.reset(ioaddr);
29077+ lp->a->reset(ioaddr);
29078
29079 /* switch pcnet32 to 32bit mode */
29080- lp->a.write_bcr(ioaddr, 20, 2);
29081+ lp->a->write_bcr(ioaddr, 20, 2);
29082
29083 netif_printk(lp, ifup, KERN_DEBUG, dev,
29084 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29085@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
29086 (u32) (lp->init_dma_addr));
29087
29088 /* set/reset autoselect bit */
29089- val = lp->a.read_bcr(ioaddr, 2) & ~2;
29090+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
29091 if (lp->options & PCNET32_PORT_ASEL)
29092 val |= 2;
29093- lp->a.write_bcr(ioaddr, 2, val);
29094+ lp->a->write_bcr(ioaddr, 2, val);
29095
29096 /* handle full duplex setting */
29097 if (lp->mii_if.full_duplex) {
29098- val = lp->a.read_bcr(ioaddr, 9) & ~3;
29099+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
29100 if (lp->options & PCNET32_PORT_FD) {
29101 val |= 1;
29102 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29103@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
29104 if (lp->chip_version == 0x2627)
29105 val |= 3;
29106 }
29107- lp->a.write_bcr(ioaddr, 9, val);
29108+ lp->a->write_bcr(ioaddr, 9, val);
29109 }
29110
29111 /* set/reset GPSI bit in test register */
29112- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29113+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29114 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29115 val |= 0x10;
29116- lp->a.write_csr(ioaddr, 124, val);
29117+ lp->a->write_csr(ioaddr, 124, val);
29118
29119 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29120 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29121@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
29122 * duplex, and/or enable auto negotiation, and clear DANAS
29123 */
29124 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29125- lp->a.write_bcr(ioaddr, 32,
29126- lp->a.read_bcr(ioaddr, 32) | 0x0080);
29127+ lp->a->write_bcr(ioaddr, 32,
29128+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
29129 /* disable Auto Negotiation, set 10Mpbs, HD */
29130- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29131+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29132 if (lp->options & PCNET32_PORT_FD)
29133 val |= 0x10;
29134 if (lp->options & PCNET32_PORT_100)
29135 val |= 0x08;
29136- lp->a.write_bcr(ioaddr, 32, val);
29137+ lp->a->write_bcr(ioaddr, 32, val);
29138 } else {
29139 if (lp->options & PCNET32_PORT_ASEL) {
29140- lp->a.write_bcr(ioaddr, 32,
29141- lp->a.read_bcr(ioaddr,
29142+ lp->a->write_bcr(ioaddr, 32,
29143+ lp->a->read_bcr(ioaddr,
29144 32) | 0x0080);
29145 /* enable auto negotiate, setup, disable fd */
29146- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29147+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29148 val |= 0x20;
29149- lp->a.write_bcr(ioaddr, 32, val);
29150+ lp->a->write_bcr(ioaddr, 32, val);
29151 }
29152 }
29153 } else {
29154@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
29155 * There is really no good other way to handle multiple PHYs
29156 * other than turning off all automatics
29157 */
29158- val = lp->a.read_bcr(ioaddr, 2);
29159- lp->a.write_bcr(ioaddr, 2, val & ~2);
29160- val = lp->a.read_bcr(ioaddr, 32);
29161- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29162+ val = lp->a->read_bcr(ioaddr, 2);
29163+ lp->a->write_bcr(ioaddr, 2, val & ~2);
29164+ val = lp->a->read_bcr(ioaddr, 32);
29165+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29166
29167 if (!(lp->options & PCNET32_PORT_ASEL)) {
29168 /* setup ecmd */
29169@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
29170 ethtool_cmd_speed_set(&ecmd,
29171 (lp->options & PCNET32_PORT_100) ?
29172 SPEED_100 : SPEED_10);
29173- bcr9 = lp->a.read_bcr(ioaddr, 9);
29174+ bcr9 = lp->a->read_bcr(ioaddr, 9);
29175
29176 if (lp->options & PCNET32_PORT_FD) {
29177 ecmd.duplex = DUPLEX_FULL;
29178@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
29179 ecmd.duplex = DUPLEX_HALF;
29180 bcr9 |= ~(1 << 0);
29181 }
29182- lp->a.write_bcr(ioaddr, 9, bcr9);
29183+ lp->a->write_bcr(ioaddr, 9, bcr9);
29184 }
29185
29186 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29187@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
29188
29189 #ifdef DO_DXSUFLO
29190 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29191- val = lp->a.read_csr(ioaddr, CSR3);
29192+ val = lp->a->read_csr(ioaddr, CSR3);
29193 val |= 0x40;
29194- lp->a.write_csr(ioaddr, CSR3, val);
29195+ lp->a->write_csr(ioaddr, CSR3, val);
29196 }
29197 #endif
29198
29199@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
29200 napi_enable(&lp->napi);
29201
29202 /* Re-initialize the PCNET32, and start it when done. */
29203- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29204- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29205+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29206+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29207
29208- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29209- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29210+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29211+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29212
29213 netif_start_queue(dev);
29214
29215@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
29216
29217 i = 0;
29218 while (i++ < 100)
29219- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29220+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29221 break;
29222 /*
29223 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29224 * reports that doing so triggers a bug in the '974.
29225 */
29226- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29227+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29228
29229 netif_printk(lp, ifup, KERN_DEBUG, dev,
29230 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29231 i,
29232 (u32) (lp->init_dma_addr),
29233- lp->a.read_csr(ioaddr, CSR0));
29234+ lp->a->read_csr(ioaddr, CSR0));
29235
29236 spin_unlock_irqrestore(&lp->lock, flags);
29237
29238@@ -2218,7 +2218,7 @@ err_free_ring:
29239 * Switch back to 16bit mode to avoid problems with dumb
29240 * DOS packet driver after a warm reboot
29241 */
29242- lp->a.write_bcr(ioaddr, 20, 4);
29243+ lp->a->write_bcr(ioaddr, 20, 4);
29244
29245 err_free_irq:
29246 spin_unlock_irqrestore(&lp->lock, flags);
29247@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
29248
29249 /* wait for stop */
29250 for (i = 0; i < 100; i++)
29251- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29252+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29253 break;
29254
29255 if (i >= 100)
29256@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
29257 return;
29258
29259 /* ReInit Ring */
29260- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29261+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29262 i = 0;
29263 while (i++ < 1000)
29264- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29265+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29266 break;
29267
29268- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29269+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29270 }
29271
29272 static void pcnet32_tx_timeout(struct net_device *dev)
29273@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
29274 /* Transmitter timeout, serious problems. */
29275 if (pcnet32_debug & NETIF_MSG_DRV)
29276 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29277- dev->name, lp->a.read_csr(ioaddr, CSR0));
29278- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29279+ dev->name, lp->a->read_csr(ioaddr, CSR0));
29280+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29281 dev->stats.tx_errors++;
29282 if (netif_msg_tx_err(lp)) {
29283 int i;
29284@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29285
29286 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29287 "%s() called, csr0 %4.4x\n",
29288- __func__, lp->a.read_csr(ioaddr, CSR0));
29289+ __func__, lp->a->read_csr(ioaddr, CSR0));
29290
29291 /* Default status -- will not enable Successful-TxDone
29292 * interrupt when that option is available to us.
29293@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29294 dev->stats.tx_bytes += skb->len;
29295
29296 /* Trigger an immediate send poll. */
29297- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29298+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29299
29300 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29301 lp->tx_full = 1;
29302@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29303
29304 spin_lock(&lp->lock);
29305
29306- csr0 = lp->a.read_csr(ioaddr, CSR0);
29307+ csr0 = lp->a->read_csr(ioaddr, CSR0);
29308 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29309 if (csr0 == 0xffff)
29310 break; /* PCMCIA remove happened */
29311 /* Acknowledge all of the current interrupt sources ASAP. */
29312- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29313+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29314
29315 netif_printk(lp, intr, KERN_DEBUG, dev,
29316 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29317- csr0, lp->a.read_csr(ioaddr, CSR0));
29318+ csr0, lp->a->read_csr(ioaddr, CSR0));
29319
29320 /* Log misc errors. */
29321 if (csr0 & 0x4000)
29322@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29323 if (napi_schedule_prep(&lp->napi)) {
29324 u16 val;
29325 /* set interrupt masks */
29326- val = lp->a.read_csr(ioaddr, CSR3);
29327+ val = lp->a->read_csr(ioaddr, CSR3);
29328 val |= 0x5f00;
29329- lp->a.write_csr(ioaddr, CSR3, val);
29330+ lp->a->write_csr(ioaddr, CSR3, val);
29331
29332 __napi_schedule(&lp->napi);
29333 break;
29334 }
29335- csr0 = lp->a.read_csr(ioaddr, CSR0);
29336+ csr0 = lp->a->read_csr(ioaddr, CSR0);
29337 }
29338
29339 netif_printk(lp, intr, KERN_DEBUG, dev,
29340 "exiting interrupt, csr0=%#4.4x\n",
29341- lp->a.read_csr(ioaddr, CSR0));
29342+ lp->a->read_csr(ioaddr, CSR0));
29343
29344 spin_unlock(&lp->lock);
29345
29346@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
29347
29348 spin_lock_irqsave(&lp->lock, flags);
29349
29350- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29351+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29352
29353 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29354 "Shutting down ethercard, status was %2.2x\n",
29355- lp->a.read_csr(ioaddr, CSR0));
29356+ lp->a->read_csr(ioaddr, CSR0));
29357
29358 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29359- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29360+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29361
29362 /*
29363 * Switch back to 16bit mode to avoid problems with dumb
29364 * DOS packet driver after a warm reboot
29365 */
29366- lp->a.write_bcr(ioaddr, 20, 4);
29367+ lp->a->write_bcr(ioaddr, 20, 4);
29368
29369 spin_unlock_irqrestore(&lp->lock, flags);
29370
29371@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
29372 unsigned long flags;
29373
29374 spin_lock_irqsave(&lp->lock, flags);
29375- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29376+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29377 spin_unlock_irqrestore(&lp->lock, flags);
29378
29379 return &dev->stats;
29380@@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
29381 if (dev->flags & IFF_ALLMULTI) {
29382 ib->filter[0] = cpu_to_le32(~0U);
29383 ib->filter[1] = cpu_to_le32(~0U);
29384- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29385- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29386- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29387- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29388+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29389+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29390+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29391+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29392 return;
29393 }
29394 /* clear the multicast filter */
29395@@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
29396 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29397 }
29398 for (i = 0; i < 4; i++)
29399- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29400+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29401 le16_to_cpu(mcast_table[i]));
29402 }
29403
29404@@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
29405
29406 spin_lock_irqsave(&lp->lock, flags);
29407 suspended = pcnet32_suspend(dev, &flags, 0);
29408- csr15 = lp->a.read_csr(ioaddr, CSR15);
29409+ csr15 = lp->a->read_csr(ioaddr, CSR15);
29410 if (dev->flags & IFF_PROMISC) {
29411 /* Log any net taps. */
29412 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29413 lp->init_block->mode =
29414 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29415 7);
29416- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29417+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29418 } else {
29419 lp->init_block->mode =
29420 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29421- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29422+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29423 pcnet32_load_multicast(dev);
29424 }
29425
29426 if (suspended) {
29427 int csr5;
29428 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29429- csr5 = lp->a.read_csr(ioaddr, CSR5);
29430- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29431+ csr5 = lp->a->read_csr(ioaddr, CSR5);
29432+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29433 } else {
29434- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29435+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29436 pcnet32_restart(dev, CSR0_NORMAL);
29437 netif_wake_queue(dev);
29438 }
29439@@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
29440 if (!lp->mii)
29441 return 0;
29442
29443- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29444- val_out = lp->a.read_bcr(ioaddr, 34);
29445+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29446+ val_out = lp->a->read_bcr(ioaddr, 34);
29447
29448 return val_out;
29449 }
29450@@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
29451 if (!lp->mii)
29452 return;
29453
29454- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29455- lp->a.write_bcr(ioaddr, 34, val);
29456+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29457+ lp->a->write_bcr(ioaddr, 34, val);
29458 }
29459
29460 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
29461@@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
29462 curr_link = mii_link_ok(&lp->mii_if);
29463 } else {
29464 ulong ioaddr = dev->base_addr; /* card base I/O address */
29465- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29466+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29467 }
29468 if (!curr_link) {
29469 if (prev_link || verbose) {
29470@@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
29471 (ecmd.duplex == DUPLEX_FULL)
29472 ? "full" : "half");
29473 }
29474- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
29475+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
29476 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
29477 if (lp->mii_if.full_duplex)
29478 bcr9 |= (1 << 0);
29479 else
29480 bcr9 &= ~(1 << 0);
29481- lp->a.write_bcr(dev->base_addr, 9, bcr9);
29482+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
29483 }
29484 } else {
29485 netif_info(lp, link, dev, "link up\n");
29486diff -urNp linux-3.0.4/drivers/net/ppp_generic.c linux-3.0.4/drivers/net/ppp_generic.c
29487--- linux-3.0.4/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
29488+++ linux-3.0.4/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
29489@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
29490 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
29491 struct ppp_stats stats;
29492 struct ppp_comp_stats cstats;
29493- char *vers;
29494
29495 switch (cmd) {
29496 case SIOCGPPPSTATS:
29497@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
29498 break;
29499
29500 case SIOCGPPPVER:
29501- vers = PPP_VERSION;
29502- if (copy_to_user(addr, vers, strlen(vers) + 1))
29503+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
29504 break;
29505 err = 0;
29506 break;
29507diff -urNp linux-3.0.4/drivers/net/r8169.c linux-3.0.4/drivers/net/r8169.c
29508--- linux-3.0.4/drivers/net/r8169.c 2011-08-23 21:44:40.000000000 -0400
29509+++ linux-3.0.4/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
29510@@ -645,12 +645,12 @@ struct rtl8169_private {
29511 struct mdio_ops {
29512 void (*write)(void __iomem *, int, int);
29513 int (*read)(void __iomem *, int);
29514- } mdio_ops;
29515+ } __no_const mdio_ops;
29516
29517 struct pll_power_ops {
29518 void (*down)(struct rtl8169_private *);
29519 void (*up)(struct rtl8169_private *);
29520- } pll_power_ops;
29521+ } __no_const pll_power_ops;
29522
29523 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
29524 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
29525diff -urNp linux-3.0.4/drivers/net/tg3.h linux-3.0.4/drivers/net/tg3.h
29526--- linux-3.0.4/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
29527+++ linux-3.0.4/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
29528@@ -134,6 +134,7 @@
29529 #define CHIPREV_ID_5750_A0 0x4000
29530 #define CHIPREV_ID_5750_A1 0x4001
29531 #define CHIPREV_ID_5750_A3 0x4003
29532+#define CHIPREV_ID_5750_C1 0x4201
29533 #define CHIPREV_ID_5750_C2 0x4202
29534 #define CHIPREV_ID_5752_A0_HW 0x5000
29535 #define CHIPREV_ID_5752_A0 0x6000
29536diff -urNp linux-3.0.4/drivers/net/tokenring/abyss.c linux-3.0.4/drivers/net/tokenring/abyss.c
29537--- linux-3.0.4/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
29538+++ linux-3.0.4/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
29539@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
29540
29541 static int __init abyss_init (void)
29542 {
29543- abyss_netdev_ops = tms380tr_netdev_ops;
29544+ pax_open_kernel();
29545+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29546
29547- abyss_netdev_ops.ndo_open = abyss_open;
29548- abyss_netdev_ops.ndo_stop = abyss_close;
29549+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
29550+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
29551+ pax_close_kernel();
29552
29553 return pci_register_driver(&abyss_driver);
29554 }
29555diff -urNp linux-3.0.4/drivers/net/tokenring/madgemc.c linux-3.0.4/drivers/net/tokenring/madgemc.c
29556--- linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
29557+++ linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
29558@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
29559
29560 static int __init madgemc_init (void)
29561 {
29562- madgemc_netdev_ops = tms380tr_netdev_ops;
29563- madgemc_netdev_ops.ndo_open = madgemc_open;
29564- madgemc_netdev_ops.ndo_stop = madgemc_close;
29565+ pax_open_kernel();
29566+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29567+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
29568+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
29569+ pax_close_kernel();
29570
29571 return mca_register_driver (&madgemc_driver);
29572 }
29573diff -urNp linux-3.0.4/drivers/net/tokenring/proteon.c linux-3.0.4/drivers/net/tokenring/proteon.c
29574--- linux-3.0.4/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
29575+++ linux-3.0.4/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
29576@@ -353,9 +353,11 @@ static int __init proteon_init(void)
29577 struct platform_device *pdev;
29578 int i, num = 0, err = 0;
29579
29580- proteon_netdev_ops = tms380tr_netdev_ops;
29581- proteon_netdev_ops.ndo_open = proteon_open;
29582- proteon_netdev_ops.ndo_stop = tms380tr_close;
29583+ pax_open_kernel();
29584+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29585+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
29586+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
29587+ pax_close_kernel();
29588
29589 err = platform_driver_register(&proteon_driver);
29590 if (err)
29591diff -urNp linux-3.0.4/drivers/net/tokenring/skisa.c linux-3.0.4/drivers/net/tokenring/skisa.c
29592--- linux-3.0.4/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
29593+++ linux-3.0.4/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
29594@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
29595 struct platform_device *pdev;
29596 int i, num = 0, err = 0;
29597
29598- sk_isa_netdev_ops = tms380tr_netdev_ops;
29599- sk_isa_netdev_ops.ndo_open = sk_isa_open;
29600- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29601+ pax_open_kernel();
29602+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29603+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
29604+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29605+ pax_close_kernel();
29606
29607 err = platform_driver_register(&sk_isa_driver);
29608 if (err)
29609diff -urNp linux-3.0.4/drivers/net/tulip/de2104x.c linux-3.0.4/drivers/net/tulip/de2104x.c
29610--- linux-3.0.4/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
29611+++ linux-3.0.4/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
29612@@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
29613 struct de_srom_info_leaf *il;
29614 void *bufp;
29615
29616+ pax_track_stack();
29617+
29618 /* download entire eeprom */
29619 for (i = 0; i < DE_EEPROM_WORDS; i++)
29620 ((__le16 *)ee_data)[i] =
29621diff -urNp linux-3.0.4/drivers/net/tulip/de4x5.c linux-3.0.4/drivers/net/tulip/de4x5.c
29622--- linux-3.0.4/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
29623+++ linux-3.0.4/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
29624@@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
29625 for (i=0; i<ETH_ALEN; i++) {
29626 tmp.addr[i] = dev->dev_addr[i];
29627 }
29628- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29629+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29630 break;
29631
29632 case DE4X5_SET_HWADDR: /* Set the hardware address */
29633@@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
29634 spin_lock_irqsave(&lp->lock, flags);
29635 memcpy(&statbuf, &lp->pktStats, ioc->len);
29636 spin_unlock_irqrestore(&lp->lock, flags);
29637- if (copy_to_user(ioc->data, &statbuf, ioc->len))
29638+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
29639 return -EFAULT;
29640 break;
29641 }
29642diff -urNp linux-3.0.4/drivers/net/usb/hso.c linux-3.0.4/drivers/net/usb/hso.c
29643--- linux-3.0.4/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
29644+++ linux-3.0.4/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
29645@@ -71,7 +71,7 @@
29646 #include <asm/byteorder.h>
29647 #include <linux/serial_core.h>
29648 #include <linux/serial.h>
29649-
29650+#include <asm/local.h>
29651
29652 #define MOD_AUTHOR "Option Wireless"
29653 #define MOD_DESCRIPTION "USB High Speed Option driver"
29654@@ -257,7 +257,7 @@ struct hso_serial {
29655
29656 /* from usb_serial_port */
29657 struct tty_struct *tty;
29658- int open_count;
29659+ local_t open_count;
29660 spinlock_t serial_lock;
29661
29662 int (*write_data) (struct hso_serial *serial);
29663@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
29664 struct urb *urb;
29665
29666 urb = serial->rx_urb[0];
29667- if (serial->open_count > 0) {
29668+ if (local_read(&serial->open_count) > 0) {
29669 count = put_rxbuf_data(urb, serial);
29670 if (count == -1)
29671 return;
29672@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
29673 DUMP1(urb->transfer_buffer, urb->actual_length);
29674
29675 /* Anyone listening? */
29676- if (serial->open_count == 0)
29677+ if (local_read(&serial->open_count) == 0)
29678 return;
29679
29680 if (status == 0) {
29681@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
29682 spin_unlock_irq(&serial->serial_lock);
29683
29684 /* check for port already opened, if not set the termios */
29685- serial->open_count++;
29686- if (serial->open_count == 1) {
29687+ if (local_inc_return(&serial->open_count) == 1) {
29688 serial->rx_state = RX_IDLE;
29689 /* Force default termio settings */
29690 _hso_serial_set_termios(tty, NULL);
29691@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
29692 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
29693 if (result) {
29694 hso_stop_serial_device(serial->parent);
29695- serial->open_count--;
29696+ local_dec(&serial->open_count);
29697 kref_put(&serial->parent->ref, hso_serial_ref_free);
29698 }
29699 } else {
29700@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
29701
29702 /* reset the rts and dtr */
29703 /* do the actual close */
29704- serial->open_count--;
29705+ local_dec(&serial->open_count);
29706
29707- if (serial->open_count <= 0) {
29708- serial->open_count = 0;
29709+ if (local_read(&serial->open_count) <= 0) {
29710+ local_set(&serial->open_count, 0);
29711 spin_lock_irq(&serial->serial_lock);
29712 if (serial->tty == tty) {
29713 serial->tty->driver_data = NULL;
29714@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
29715
29716 /* the actual setup */
29717 spin_lock_irqsave(&serial->serial_lock, flags);
29718- if (serial->open_count)
29719+ if (local_read(&serial->open_count))
29720 _hso_serial_set_termios(tty, old);
29721 else
29722 tty->termios = old;
29723@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
29724 D1("Pending read interrupt on port %d\n", i);
29725 spin_lock(&serial->serial_lock);
29726 if (serial->rx_state == RX_IDLE &&
29727- serial->open_count > 0) {
29728+ local_read(&serial->open_count) > 0) {
29729 /* Setup and send a ctrl req read on
29730 * port i */
29731 if (!serial->rx_urb_filled[0]) {
29732@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
29733 /* Start all serial ports */
29734 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
29735 if (serial_table[i] && (serial_table[i]->interface == iface)) {
29736- if (dev2ser(serial_table[i])->open_count) {
29737+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
29738 result =
29739 hso_start_serial_device(serial_table[i], GFP_NOIO);
29740 hso_kick_transmit(dev2ser(serial_table[i]));
29741diff -urNp linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c
29742--- linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
29743+++ linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
29744@@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
29745 * Return with error code if any of the queue indices
29746 * is out of range
29747 */
29748- if (p->ring_index[i] < 0 ||
29749- p->ring_index[i] >= adapter->num_rx_queues)
29750+ if (p->ring_index[i] >= adapter->num_rx_queues)
29751 return -EINVAL;
29752 }
29753
29754diff -urNp linux-3.0.4/drivers/net/vxge/vxge-config.h linux-3.0.4/drivers/net/vxge/vxge-config.h
29755--- linux-3.0.4/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
29756+++ linux-3.0.4/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
29757@@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
29758 void (*link_down)(struct __vxge_hw_device *devh);
29759 void (*crit_err)(struct __vxge_hw_device *devh,
29760 enum vxge_hw_event type, u64 ext_data);
29761-};
29762+} __no_const;
29763
29764 /*
29765 * struct __vxge_hw_blockpool_entry - Block private data structure
29766diff -urNp linux-3.0.4/drivers/net/vxge/vxge-main.c linux-3.0.4/drivers/net/vxge/vxge-main.c
29767--- linux-3.0.4/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
29768+++ linux-3.0.4/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
29769@@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
29770 struct sk_buff *completed[NR_SKB_COMPLETED];
29771 int more;
29772
29773+ pax_track_stack();
29774+
29775 do {
29776 more = 0;
29777 skb_ptr = completed;
29778@@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
29779 u8 mtable[256] = {0}; /* CPU to vpath mapping */
29780 int index;
29781
29782+ pax_track_stack();
29783+
29784 /*
29785 * Filling
29786 * - itable with bucket numbers
29787diff -urNp linux-3.0.4/drivers/net/vxge/vxge-traffic.h linux-3.0.4/drivers/net/vxge/vxge-traffic.h
29788--- linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
29789+++ linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
29790@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
29791 struct vxge_hw_mempool_dma *dma_object,
29792 u32 index,
29793 u32 is_last);
29794-};
29795+} __no_const;
29796
29797 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
29798 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
29799diff -urNp linux-3.0.4/drivers/net/wan/cycx_x25.c linux-3.0.4/drivers/net/wan/cycx_x25.c
29800--- linux-3.0.4/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
29801+++ linux-3.0.4/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
29802@@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
29803 unsigned char hex[1024],
29804 * phex = hex;
29805
29806+ pax_track_stack();
29807+
29808 if (len >= (sizeof(hex) / 2))
29809 len = (sizeof(hex) / 2) - 1;
29810
29811diff -urNp linux-3.0.4/drivers/net/wan/hdlc_x25.c linux-3.0.4/drivers/net/wan/hdlc_x25.c
29812--- linux-3.0.4/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
29813+++ linux-3.0.4/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
29814@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
29815
29816 static int x25_open(struct net_device *dev)
29817 {
29818- struct lapb_register_struct cb;
29819+ static struct lapb_register_struct cb = {
29820+ .connect_confirmation = x25_connected,
29821+ .connect_indication = x25_connected,
29822+ .disconnect_confirmation = x25_disconnected,
29823+ .disconnect_indication = x25_disconnected,
29824+ .data_indication = x25_data_indication,
29825+ .data_transmit = x25_data_transmit
29826+ };
29827 int result;
29828
29829- cb.connect_confirmation = x25_connected;
29830- cb.connect_indication = x25_connected;
29831- cb.disconnect_confirmation = x25_disconnected;
29832- cb.disconnect_indication = x25_disconnected;
29833- cb.data_indication = x25_data_indication;
29834- cb.data_transmit = x25_data_transmit;
29835-
29836 result = lapb_register(dev, &cb);
29837 if (result != LAPB_OK)
29838 return result;
29839diff -urNp linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c
29840--- linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
29841+++ linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
29842@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
29843 int do_autopm = 1;
29844 DECLARE_COMPLETION_ONSTACK(notif_completion);
29845
29846+ pax_track_stack();
29847+
29848 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
29849 i2400m, ack, ack_size);
29850 BUG_ON(_ack == i2400m->bm_ack_buf);
29851diff -urNp linux-3.0.4/drivers/net/wireless/airo.c linux-3.0.4/drivers/net/wireless/airo.c
29852--- linux-3.0.4/drivers/net/wireless/airo.c 2011-08-23 21:44:40.000000000 -0400
29853+++ linux-3.0.4/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
29854@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
29855 BSSListElement * loop_net;
29856 BSSListElement * tmp_net;
29857
29858+ pax_track_stack();
29859+
29860 /* Blow away current list of scan results */
29861 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
29862 list_move_tail (&loop_net->list, &ai->network_free_list);
29863@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
29864 WepKeyRid wkr;
29865 int rc;
29866
29867+ pax_track_stack();
29868+
29869 memset( &mySsid, 0, sizeof( mySsid ) );
29870 kfree (ai->flash);
29871 ai->flash = NULL;
29872@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
29873 __le32 *vals = stats.vals;
29874 int len;
29875
29876+ pax_track_stack();
29877+
29878 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29879 return -ENOMEM;
29880 data = file->private_data;
29881@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
29882 /* If doLoseSync is not 1, we won't do a Lose Sync */
29883 int doLoseSync = -1;
29884
29885+ pax_track_stack();
29886+
29887 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29888 return -ENOMEM;
29889 data = file->private_data;
29890@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
29891 int i;
29892 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
29893
29894+ pax_track_stack();
29895+
29896 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
29897 if (!qual)
29898 return -ENOMEM;
29899@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
29900 CapabilityRid cap_rid;
29901 __le32 *vals = stats_rid.vals;
29902
29903+ pax_track_stack();
29904+
29905 /* Get stats out of the card */
29906 clear_bit(JOB_WSTATS, &local->jobs);
29907 if (local->power.event) {
29908diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c
29909--- linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
29910+++ linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
29911@@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
29912 unsigned int v;
29913 u64 tsf;
29914
29915+ pax_track_stack();
29916+
29917 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
29918 len += snprintf(buf+len, sizeof(buf)-len,
29919 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
29920@@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
29921 unsigned int len = 0;
29922 unsigned int i;
29923
29924+ pax_track_stack();
29925+
29926 len += snprintf(buf+len, sizeof(buf)-len,
29927 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
29928
29929@@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
29930 unsigned int i;
29931 unsigned int v;
29932
29933+ pax_track_stack();
29934+
29935 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
29936 sc->ah->ah_ant_mode);
29937 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
29938@@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
29939 unsigned int len = 0;
29940 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
29941
29942+ pax_track_stack();
29943+
29944 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
29945 sc->bssidmask);
29946 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
29947@@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
29948 unsigned int len = 0;
29949 int i;
29950
29951+ pax_track_stack();
29952+
29953 len += snprintf(buf+len, sizeof(buf)-len,
29954 "RX\n---------------------\n");
29955 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
29956@@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
29957 char buf[700];
29958 unsigned int len = 0;
29959
29960+ pax_track_stack();
29961+
29962 len += snprintf(buf+len, sizeof(buf)-len,
29963 "HW has PHY error counters:\t%s\n",
29964 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
29965@@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
29966 struct ath5k_buf *bf, *bf0;
29967 int i, n;
29968
29969+ pax_track_stack();
29970+
29971 len += snprintf(buf+len, sizeof(buf)-len,
29972 "available txbuffers: %d\n", sc->txbuf_len);
29973
29974diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c
29975--- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
29976+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
29977@@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
29978 int i, im, j;
29979 int nmeasurement;
29980
29981+ pax_track_stack();
29982+
29983 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
29984 if (ah->txchainmask & (1 << i))
29985 num_chains++;
29986diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
29987--- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
29988+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
29989@@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
29990 int theta_low_bin = 0;
29991 int i;
29992
29993+ pax_track_stack();
29994+
29995 /* disregard any bin that contains <= 16 samples */
29996 thresh_accum_cnt = 16;
29997 scale_factor = 5;
29998diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c
29999--- linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
30000+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
30001@@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
30002 char buf[512];
30003 unsigned int len = 0;
30004
30005+ pax_track_stack();
30006+
30007 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30008 len += snprintf(buf + len, sizeof(buf) - len,
30009 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30010@@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
30011 u8 addr[ETH_ALEN];
30012 u32 tmp;
30013
30014+ pax_track_stack();
30015+
30016 len += snprintf(buf + len, sizeof(buf) - len,
30017 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
30018 wiphy_name(sc->hw->wiphy),
30019diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
30020--- linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
30021+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
30022@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
30023 unsigned int len = 0;
30024 int ret = 0;
30025
30026+ pax_track_stack();
30027+
30028 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30029
30030 ath9k_htc_ps_wakeup(priv);
30031@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
30032 unsigned int len = 0;
30033 int ret = 0;
30034
30035+ pax_track_stack();
30036+
30037 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30038
30039 ath9k_htc_ps_wakeup(priv);
30040@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
30041 unsigned int len = 0;
30042 int ret = 0;
30043
30044+ pax_track_stack();
30045+
30046 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30047
30048 ath9k_htc_ps_wakeup(priv);
30049@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
30050 char buf[512];
30051 unsigned int len = 0;
30052
30053+ pax_track_stack();
30054+
30055 len += snprintf(buf + len, sizeof(buf) - len,
30056 "%20s : %10u\n", "Buffers queued",
30057 priv->debug.tx_stats.buf_queued);
30058@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
30059 char buf[512];
30060 unsigned int len = 0;
30061
30062+ pax_track_stack();
30063+
30064 spin_lock_bh(&priv->tx.tx_lock);
30065
30066 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
30067@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
30068 char buf[512];
30069 unsigned int len = 0;
30070
30071+ pax_track_stack();
30072+
30073 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
30074 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
30075
30076diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h
30077--- linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:44:40.000000000 -0400
30078+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
30079@@ -585,7 +585,7 @@ struct ath_hw_private_ops {
30080
30081 /* ANI */
30082 void (*ani_cache_ini_regs)(struct ath_hw *ah);
30083-};
30084+} __no_const;
30085
30086 /**
30087 * struct ath_hw_ops - callbacks used by hardware code and driver code
30088@@ -637,7 +637,7 @@ struct ath_hw_ops {
30089 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
30090 struct ath_hw_antcomb_conf *antconf);
30091
30092-};
30093+} __no_const;
30094
30095 struct ath_nf_limits {
30096 s16 max;
30097@@ -650,7 +650,7 @@ struct ath_nf_limits {
30098 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
30099
30100 struct ath_hw {
30101- struct ath_ops reg_ops;
30102+ ath_ops_no_const reg_ops;
30103
30104 struct ieee80211_hw *hw;
30105 struct ath_common common;
30106diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath.h linux-3.0.4/drivers/net/wireless/ath/ath.h
30107--- linux-3.0.4/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
30108+++ linux-3.0.4/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
30109@@ -121,6 +121,7 @@ struct ath_ops {
30110 void (*write_flush) (void *);
30111 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
30112 };
30113+typedef struct ath_ops __no_const ath_ops_no_const;
30114
30115 struct ath_common;
30116 struct ath_bus_ops;
30117diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c
30118--- linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
30119+++ linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
30120@@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30121 int err;
30122 DECLARE_SSID_BUF(ssid);
30123
30124+ pax_track_stack();
30125+
30126 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30127
30128 if (ssid_len)
30129@@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30130 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30131 int err;
30132
30133+ pax_track_stack();
30134+
30135 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30136 idx, keylen, len);
30137
30138diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c
30139--- linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
30140+++ linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
30141@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30142 unsigned long flags;
30143 DECLARE_SSID_BUF(ssid);
30144
30145+ pax_track_stack();
30146+
30147 LIBIPW_DEBUG_SCAN("'%s' (%pM"
30148 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30149 print_ssid(ssid, info_element->data, info_element->len),
30150diff -urNp linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c
30151--- linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-07-21 22:17:23.000000000 -0400
30152+++ linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-23 21:47:55.000000000 -0400
30153@@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
30154 */
30155 if (iwl3945_mod_params.disable_hw_scan) {
30156 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30157- iwl3945_hw_ops.hw_scan = NULL;
30158+ pax_open_kernel();
30159+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30160+ pax_close_kernel();
30161 }
30162
30163 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30164diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30165--- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
30166+++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
30167@@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
30168 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30169 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30170
30171+ pax_track_stack();
30172+
30173 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30174
30175 /* Treat uninitialized rate scaling data same as non-existing. */
30176@@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
30177 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30178 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30179
30180+ pax_track_stack();
30181+
30182 /* Override starting rate (index 0) if needed for debug purposes */
30183 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30184
30185diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30186--- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
30187+++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
30188@@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
30189 int pos = 0;
30190 const size_t bufsz = sizeof(buf);
30191
30192+ pax_track_stack();
30193+
30194 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30195 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30196 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30197@@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30198 char buf[256 * NUM_IWL_RXON_CTX];
30199 const size_t bufsz = sizeof(buf);
30200
30201+ pax_track_stack();
30202+
30203 for_each_context(priv, ctx) {
30204 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30205 ctx->ctxid);
30206diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h
30207--- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
30208+++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
30209@@ -68,8 +68,8 @@ do {
30210 } while (0)
30211
30212 #else
30213-#define IWL_DEBUG(__priv, level, fmt, args...)
30214-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30215+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30216+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30217 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30218 const void *p, u32 len)
30219 {}
30220diff -urNp linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c
30221--- linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
30222+++ linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
30223@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30224 int buf_len = 512;
30225 size_t len = 0;
30226
30227+ pax_track_stack();
30228+
30229 if (*ppos != 0)
30230 return 0;
30231 if (count < sizeof(buf))
30232diff -urNp linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c
30233--- linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
30234+++ linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
30235@@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30236 return -EINVAL;
30237
30238 if (fake_hw_scan) {
30239- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30240- mac80211_hwsim_ops.sw_scan_start = NULL;
30241- mac80211_hwsim_ops.sw_scan_complete = NULL;
30242+ pax_open_kernel();
30243+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30244+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30245+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30246+ pax_close_kernel();
30247 }
30248
30249 spin_lock_init(&hwsim_radio_lock);
30250diff -urNp linux-3.0.4/drivers/net/wireless/rndis_wlan.c linux-3.0.4/drivers/net/wireless/rndis_wlan.c
30251--- linux-3.0.4/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
30252+++ linux-3.0.4/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
30253@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30254
30255 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30256
30257- if (rts_threshold < 0 || rts_threshold > 2347)
30258+ if (rts_threshold > 2347)
30259 rts_threshold = 2347;
30260
30261 tmp = cpu_to_le32(rts_threshold);
30262diff -urNp linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30263--- linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
30264+++ linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
30265@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
30266 u8 rfpath;
30267 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30268
30269+ pax_track_stack();
30270+
30271 precommoncmdcnt = 0;
30272 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30273 MAX_PRECMD_CNT,
30274diff -urNp linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h
30275--- linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
30276+++ linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
30277@@ -266,7 +266,7 @@ struct wl1251_if_operations {
30278 void (*reset)(struct wl1251 *wl);
30279 void (*enable_irq)(struct wl1251 *wl);
30280 void (*disable_irq)(struct wl1251 *wl);
30281-};
30282+} __no_const;
30283
30284 struct wl1251 {
30285 struct ieee80211_hw *hw;
30286diff -urNp linux-3.0.4/drivers/net/wireless/wl12xx/spi.c linux-3.0.4/drivers/net/wireless/wl12xx/spi.c
30287--- linux-3.0.4/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
30288+++ linux-3.0.4/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
30289@@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30290 u32 chunk_len;
30291 int i;
30292
30293+ pax_track_stack();
30294+
30295 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30296
30297 spi_message_init(&m);
30298diff -urNp linux-3.0.4/drivers/oprofile/buffer_sync.c linux-3.0.4/drivers/oprofile/buffer_sync.c
30299--- linux-3.0.4/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
30300+++ linux-3.0.4/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
30301@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30302 if (cookie == NO_COOKIE)
30303 offset = pc;
30304 if (cookie == INVALID_COOKIE) {
30305- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30306+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30307 offset = pc;
30308 }
30309 if (cookie != last_cookie) {
30310@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30311 /* add userspace sample */
30312
30313 if (!mm) {
30314- atomic_inc(&oprofile_stats.sample_lost_no_mm);
30315+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30316 return 0;
30317 }
30318
30319 cookie = lookup_dcookie(mm, s->eip, &offset);
30320
30321 if (cookie == INVALID_COOKIE) {
30322- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30323+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30324 return 0;
30325 }
30326
30327@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30328 /* ignore backtraces if failed to add a sample */
30329 if (state == sb_bt_start) {
30330 state = sb_bt_ignore;
30331- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30332+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30333 }
30334 }
30335 release_mm(mm);
30336diff -urNp linux-3.0.4/drivers/oprofile/event_buffer.c linux-3.0.4/drivers/oprofile/event_buffer.c
30337--- linux-3.0.4/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
30338+++ linux-3.0.4/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
30339@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30340 }
30341
30342 if (buffer_pos == buffer_size) {
30343- atomic_inc(&oprofile_stats.event_lost_overflow);
30344+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30345 return;
30346 }
30347
30348diff -urNp linux-3.0.4/drivers/oprofile/oprof.c linux-3.0.4/drivers/oprofile/oprof.c
30349--- linux-3.0.4/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
30350+++ linux-3.0.4/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
30351@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30352 if (oprofile_ops.switch_events())
30353 return;
30354
30355- atomic_inc(&oprofile_stats.multiplex_counter);
30356+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30357 start_switch_worker();
30358 }
30359
30360diff -urNp linux-3.0.4/drivers/oprofile/oprofilefs.c linux-3.0.4/drivers/oprofile/oprofilefs.c
30361--- linux-3.0.4/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
30362+++ linux-3.0.4/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
30363@@ -186,7 +186,7 @@ static const struct file_operations atom
30364
30365
30366 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30367- char const *name, atomic_t *val)
30368+ char const *name, atomic_unchecked_t *val)
30369 {
30370 return __oprofilefs_create_file(sb, root, name,
30371 &atomic_ro_fops, 0444, val);
30372diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.c linux-3.0.4/drivers/oprofile/oprofile_stats.c
30373--- linux-3.0.4/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
30374+++ linux-3.0.4/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
30375@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30376 cpu_buf->sample_invalid_eip = 0;
30377 }
30378
30379- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30380- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30381- atomic_set(&oprofile_stats.event_lost_overflow, 0);
30382- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30383- atomic_set(&oprofile_stats.multiplex_counter, 0);
30384+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30385+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30386+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30387+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30388+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30389 }
30390
30391
30392diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.h linux-3.0.4/drivers/oprofile/oprofile_stats.h
30393--- linux-3.0.4/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
30394+++ linux-3.0.4/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
30395@@ -13,11 +13,11 @@
30396 #include <asm/atomic.h>
30397
30398 struct oprofile_stat_struct {
30399- atomic_t sample_lost_no_mm;
30400- atomic_t sample_lost_no_mapping;
30401- atomic_t bt_lost_no_mapping;
30402- atomic_t event_lost_overflow;
30403- atomic_t multiplex_counter;
30404+ atomic_unchecked_t sample_lost_no_mm;
30405+ atomic_unchecked_t sample_lost_no_mapping;
30406+ atomic_unchecked_t bt_lost_no_mapping;
30407+ atomic_unchecked_t event_lost_overflow;
30408+ atomic_unchecked_t multiplex_counter;
30409 };
30410
30411 extern struct oprofile_stat_struct oprofile_stats;
30412diff -urNp linux-3.0.4/drivers/parport/procfs.c linux-3.0.4/drivers/parport/procfs.c
30413--- linux-3.0.4/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
30414+++ linux-3.0.4/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
30415@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30416
30417 *ppos += len;
30418
30419- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30420+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30421 }
30422
30423 #ifdef CONFIG_PARPORT_1284
30424@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30425
30426 *ppos += len;
30427
30428- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30429+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30430 }
30431 #endif /* IEEE1284.3 support. */
30432
30433diff -urNp linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h
30434--- linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
30435+++ linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
30436@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30437 int (*hardware_test) (struct slot* slot, u32 value);
30438 u8 (*get_power) (struct slot* slot);
30439 int (*set_power) (struct slot* slot, int value);
30440-};
30441+} __no_const;
30442
30443 struct cpci_hp_controller {
30444 unsigned int irq;
30445diff -urNp linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c
30446--- linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
30447+++ linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
30448@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
30449
30450 void compaq_nvram_init (void __iomem *rom_start)
30451 {
30452+
30453+#ifndef CONFIG_PAX_KERNEXEC
30454 if (rom_start) {
30455 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
30456 }
30457+#endif
30458+
30459 dbg("int15 entry = %p\n", compaq_int15_entry_point);
30460
30461 /* initialize our int15 lock */
30462diff -urNp linux-3.0.4/drivers/pci/pcie/aspm.c linux-3.0.4/drivers/pci/pcie/aspm.c
30463--- linux-3.0.4/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
30464+++ linux-3.0.4/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
30465@@ -27,9 +27,9 @@
30466 #define MODULE_PARAM_PREFIX "pcie_aspm."
30467
30468 /* Note: those are not register definitions */
30469-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
30470-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
30471-#define ASPM_STATE_L1 (4) /* L1 state */
30472+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
30473+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
30474+#define ASPM_STATE_L1 (4U) /* L1 state */
30475 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
30476 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
30477
30478diff -urNp linux-3.0.4/drivers/pci/probe.c linux-3.0.4/drivers/pci/probe.c
30479--- linux-3.0.4/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
30480+++ linux-3.0.4/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
30481@@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
30482 u32 l, sz, mask;
30483 u16 orig_cmd;
30484
30485- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
30486+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
30487
30488 if (!dev->mmio_always_on) {
30489 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
30490diff -urNp linux-3.0.4/drivers/pci/proc.c linux-3.0.4/drivers/pci/proc.c
30491--- linux-3.0.4/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
30492+++ linux-3.0.4/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
30493@@ -476,7 +476,16 @@ static const struct file_operations proc
30494 static int __init pci_proc_init(void)
30495 {
30496 struct pci_dev *dev = NULL;
30497+
30498+#ifdef CONFIG_GRKERNSEC_PROC_ADD
30499+#ifdef CONFIG_GRKERNSEC_PROC_USER
30500+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
30501+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
30502+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
30503+#endif
30504+#else
30505 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
30506+#endif
30507 proc_create("devices", 0, proc_bus_pci_dir,
30508 &proc_bus_pci_dev_operations);
30509 proc_initialized = 1;
30510diff -urNp linux-3.0.4/drivers/pci/xen-pcifront.c linux-3.0.4/drivers/pci/xen-pcifront.c
30511--- linux-3.0.4/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
30512+++ linux-3.0.4/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
30513@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
30514 struct pcifront_sd *sd = bus->sysdata;
30515 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30516
30517+ pax_track_stack();
30518+
30519 if (verbose_request)
30520 dev_info(&pdev->xdev->dev,
30521 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
30522@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
30523 struct pcifront_sd *sd = bus->sysdata;
30524 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30525
30526+ pax_track_stack();
30527+
30528 if (verbose_request)
30529 dev_info(&pdev->xdev->dev,
30530 "write dev=%04x:%02x:%02x.%01x - "
30531@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
30532 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30533 struct msi_desc *entry;
30534
30535+ pax_track_stack();
30536+
30537 if (nvec > SH_INFO_MAX_VEC) {
30538 dev_err(&dev->dev, "too much vector for pci frontend: %x."
30539 " Increase SH_INFO_MAX_VEC.\n", nvec);
30540@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
30541 struct pcifront_sd *sd = dev->bus->sysdata;
30542 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30543
30544+ pax_track_stack();
30545+
30546 err = do_pci_op(pdev, &op);
30547
30548 /* What should do for error ? */
30549@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
30550 struct pcifront_sd *sd = dev->bus->sysdata;
30551 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30552
30553+ pax_track_stack();
30554+
30555 err = do_pci_op(pdev, &op);
30556 if (likely(!err)) {
30557 vector[0] = op.value;
30558diff -urNp linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c
30559--- linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
30560+++ linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
30561@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
30562 return 0;
30563 }
30564
30565-void static hotkey_mask_warn_incomplete_mask(void)
30566+static void hotkey_mask_warn_incomplete_mask(void)
30567 {
30568 /* log only what the user can fix... */
30569 const u32 wantedmask = hotkey_driver_mask &
30570diff -urNp linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c
30571--- linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
30572+++ linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
30573@@ -59,7 +59,7 @@ do { \
30574 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
30575 } while(0)
30576
30577-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
30578+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
30579 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
30580
30581 /*
30582@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
30583
30584 cpu = get_cpu();
30585 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
30586+
30587+ pax_open_kernel();
30588 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
30589+ pax_close_kernel();
30590
30591 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
30592 spin_lock_irqsave(&pnp_bios_lock, flags);
30593@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
30594 :"memory");
30595 spin_unlock_irqrestore(&pnp_bios_lock, flags);
30596
30597+ pax_open_kernel();
30598 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
30599+ pax_close_kernel();
30600+
30601 put_cpu();
30602
30603 /* If we get here and this is set then the PnP BIOS faulted on us. */
30604@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
30605 return status;
30606 }
30607
30608-void pnpbios_calls_init(union pnp_bios_install_struct *header)
30609+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
30610 {
30611 int i;
30612
30613@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
30614 pnp_bios_callpoint.offset = header->fields.pm16offset;
30615 pnp_bios_callpoint.segment = PNP_CS16;
30616
30617+ pax_open_kernel();
30618+
30619 for_each_possible_cpu(i) {
30620 struct desc_struct *gdt = get_cpu_gdt_table(i);
30621 if (!gdt)
30622@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
30623 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
30624 (unsigned long)__va(header->fields.pm16dseg));
30625 }
30626+
30627+ pax_close_kernel();
30628 }
30629diff -urNp linux-3.0.4/drivers/pnp/resource.c linux-3.0.4/drivers/pnp/resource.c
30630--- linux-3.0.4/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
30631+++ linux-3.0.4/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
30632@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
30633 return 1;
30634
30635 /* check if the resource is valid */
30636- if (*irq < 0 || *irq > 15)
30637+ if (*irq > 15)
30638 return 0;
30639
30640 /* check if the resource is reserved */
30641@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
30642 return 1;
30643
30644 /* check if the resource is valid */
30645- if (*dma < 0 || *dma == 4 || *dma > 7)
30646+ if (*dma == 4 || *dma > 7)
30647 return 0;
30648
30649 /* check if the resource is reserved */
30650diff -urNp linux-3.0.4/drivers/power/bq27x00_battery.c linux-3.0.4/drivers/power/bq27x00_battery.c
30651--- linux-3.0.4/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
30652+++ linux-3.0.4/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
30653@@ -67,7 +67,7 @@
30654 struct bq27x00_device_info;
30655 struct bq27x00_access_methods {
30656 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
30657-};
30658+} __no_const;
30659
30660 enum bq27x00_chip { BQ27000, BQ27500 };
30661
30662diff -urNp linux-3.0.4/drivers/regulator/max8660.c linux-3.0.4/drivers/regulator/max8660.c
30663--- linux-3.0.4/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
30664+++ linux-3.0.4/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
30665@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
30666 max8660->shadow_regs[MAX8660_OVER1] = 5;
30667 } else {
30668 /* Otherwise devices can be toggled via software */
30669- max8660_dcdc_ops.enable = max8660_dcdc_enable;
30670- max8660_dcdc_ops.disable = max8660_dcdc_disable;
30671+ pax_open_kernel();
30672+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
30673+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
30674+ pax_close_kernel();
30675 }
30676
30677 /*
30678diff -urNp linux-3.0.4/drivers/regulator/mc13892-regulator.c linux-3.0.4/drivers/regulator/mc13892-regulator.c
30679--- linux-3.0.4/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
30680+++ linux-3.0.4/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
30681@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
30682 }
30683 mc13xxx_unlock(mc13892);
30684
30685- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30686+ pax_open_kernel();
30687+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30688 = mc13892_vcam_set_mode;
30689- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30690+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30691 = mc13892_vcam_get_mode;
30692+ pax_close_kernel();
30693 for (i = 0; i < pdata->num_regulators; i++) {
30694 init_data = &pdata->regulators[i];
30695 priv->regulators[i] = regulator_register(
30696diff -urNp linux-3.0.4/drivers/rtc/rtc-dev.c linux-3.0.4/drivers/rtc/rtc-dev.c
30697--- linux-3.0.4/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
30698+++ linux-3.0.4/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
30699@@ -14,6 +14,7 @@
30700 #include <linux/module.h>
30701 #include <linux/rtc.h>
30702 #include <linux/sched.h>
30703+#include <linux/grsecurity.h>
30704 #include "rtc-core.h"
30705
30706 static dev_t rtc_devt;
30707@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
30708 if (copy_from_user(&tm, uarg, sizeof(tm)))
30709 return -EFAULT;
30710
30711+ gr_log_timechange();
30712+
30713 return rtc_set_time(rtc, &tm);
30714
30715 case RTC_PIE_ON:
30716diff -urNp linux-3.0.4/drivers/scsi/aacraid/aacraid.h linux-3.0.4/drivers/scsi/aacraid/aacraid.h
30717--- linux-3.0.4/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
30718+++ linux-3.0.4/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
30719@@ -492,7 +492,7 @@ struct adapter_ops
30720 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
30721 /* Administrative operations */
30722 int (*adapter_comm)(struct aac_dev * dev, int comm);
30723-};
30724+} __no_const;
30725
30726 /*
30727 * Define which interrupt handler needs to be installed
30728diff -urNp linux-3.0.4/drivers/scsi/aacraid/commctrl.c linux-3.0.4/drivers/scsi/aacraid/commctrl.c
30729--- linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
30730+++ linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
30731@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
30732 u32 actual_fibsize64, actual_fibsize = 0;
30733 int i;
30734
30735+ pax_track_stack();
30736
30737 if (dev->in_reset) {
30738 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
30739diff -urNp linux-3.0.4/drivers/scsi/bfa/bfad.c linux-3.0.4/drivers/scsi/bfa/bfad.c
30740--- linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
30741+++ linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
30742@@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
30743 struct bfad_vport_s *vport, *vport_new;
30744 struct bfa_fcs_driver_info_s driver_info;
30745
30746+ pax_track_stack();
30747+
30748 /* Fill the driver_info info to fcs*/
30749 memset(&driver_info, 0, sizeof(driver_info));
30750 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
30751diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c
30752--- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
30753+++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
30754@@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
30755 u16 len, count;
30756 u16 templen;
30757
30758+ pax_track_stack();
30759+
30760 /*
30761 * get hba attributes
30762 */
30763@@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
30764 u8 count = 0;
30765 u16 templen;
30766
30767+ pax_track_stack();
30768+
30769 /*
30770 * get port attributes
30771 */
30772diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c
30773--- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
30774+++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
30775@@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
30776 struct fc_rpsc_speed_info_s speeds;
30777 struct bfa_port_attr_s pport_attr;
30778
30779+ pax_track_stack();
30780+
30781 bfa_trc(port->fcs, rx_fchs->s_id);
30782 bfa_trc(port->fcs, rx_fchs->d_id);
30783
30784diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa.h linux-3.0.4/drivers/scsi/bfa/bfa.h
30785--- linux-3.0.4/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
30786+++ linux-3.0.4/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
30787@@ -238,7 +238,7 @@ struct bfa_hwif_s {
30788 u32 *nvecs, u32 *maxvec);
30789 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
30790 u32 *end);
30791-};
30792+} __no_const;
30793 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
30794
30795 struct bfa_iocfc_s {
30796diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h
30797--- linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
30798+++ linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
30799@@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
30800 bfa_ioc_disable_cbfn_t disable_cbfn;
30801 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
30802 bfa_ioc_reset_cbfn_t reset_cbfn;
30803-};
30804+} __no_const;
30805
30806 /*
30807 * Heartbeat failure notification queue element.
30808@@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
30809 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
30810 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
30811 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
30812-};
30813+} __no_const;
30814
30815 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
30816 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
30817diff -urNp linux-3.0.4/drivers/scsi/BusLogic.c linux-3.0.4/drivers/scsi/BusLogic.c
30818--- linux-3.0.4/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
30819+++ linux-3.0.4/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
30820@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
30821 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
30822 *PrototypeHostAdapter)
30823 {
30824+ pax_track_stack();
30825+
30826 /*
30827 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
30828 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
30829diff -urNp linux-3.0.4/drivers/scsi/dpt_i2o.c linux-3.0.4/drivers/scsi/dpt_i2o.c
30830--- linux-3.0.4/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
30831+++ linux-3.0.4/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
30832@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
30833 dma_addr_t addr;
30834 ulong flags = 0;
30835
30836+ pax_track_stack();
30837+
30838 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
30839 // get user msg size in u32s
30840 if(get_user(size, &user_msg[0])){
30841@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
30842 s32 rcode;
30843 dma_addr_t addr;
30844
30845+ pax_track_stack();
30846+
30847 memset(msg, 0 , sizeof(msg));
30848 len = scsi_bufflen(cmd);
30849 direction = 0x00000000;
30850diff -urNp linux-3.0.4/drivers/scsi/eata.c linux-3.0.4/drivers/scsi/eata.c
30851--- linux-3.0.4/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
30852+++ linux-3.0.4/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
30853@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
30854 struct hostdata *ha;
30855 char name[16];
30856
30857+ pax_track_stack();
30858+
30859 sprintf(name, "%s%d", driver_name, j);
30860
30861 if (!request_region(port_base, REGION_SIZE, driver_name)) {
30862diff -urNp linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c
30863--- linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
30864+++ linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
30865@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
30866 } buf;
30867 int rc;
30868
30869+ pax_track_stack();
30870+
30871 fiph = (struct fip_header *)skb->data;
30872 sub = fiph->fip_subcode;
30873
30874diff -urNp linux-3.0.4/drivers/scsi/gdth.c linux-3.0.4/drivers/scsi/gdth.c
30875--- linux-3.0.4/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
30876+++ linux-3.0.4/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
30877@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
30878 unsigned long flags;
30879 gdth_ha_str *ha;
30880
30881+ pax_track_stack();
30882+
30883 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
30884 return -EFAULT;
30885 ha = gdth_find_ha(ldrv.ionode);
30886@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
30887 gdth_ha_str *ha;
30888 int rval;
30889
30890+ pax_track_stack();
30891+
30892 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
30893 res.number >= MAX_HDRIVES)
30894 return -EFAULT;
30895@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
30896 gdth_ha_str *ha;
30897 int rval;
30898
30899+ pax_track_stack();
30900+
30901 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
30902 return -EFAULT;
30903 ha = gdth_find_ha(gen.ionode);
30904@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
30905 int i;
30906 gdth_cmd_str gdtcmd;
30907 char cmnd[MAX_COMMAND_SIZE];
30908+
30909+ pax_track_stack();
30910+
30911 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
30912
30913 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
30914diff -urNp linux-3.0.4/drivers/scsi/gdth_proc.c linux-3.0.4/drivers/scsi/gdth_proc.c
30915--- linux-3.0.4/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
30916+++ linux-3.0.4/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
30917@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
30918 u64 paddr;
30919
30920 char cmnd[MAX_COMMAND_SIZE];
30921+
30922+ pax_track_stack();
30923+
30924 memset(cmnd, 0xff, 12);
30925 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
30926
30927@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
30928 gdth_hget_str *phg;
30929 char cmnd[MAX_COMMAND_SIZE];
30930
30931+ pax_track_stack();
30932+
30933 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
30934 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
30935 if (!gdtcmd || !estr)
30936diff -urNp linux-3.0.4/drivers/scsi/hosts.c linux-3.0.4/drivers/scsi/hosts.c
30937--- linux-3.0.4/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
30938+++ linux-3.0.4/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
30939@@ -42,7 +42,7 @@
30940 #include "scsi_logging.h"
30941
30942
30943-static atomic_t scsi_host_next_hn; /* host_no for next new host */
30944+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
30945
30946
30947 static void scsi_host_cls_release(struct device *dev)
30948@@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
30949 * subtract one because we increment first then return, but we need to
30950 * know what the next host number was before increment
30951 */
30952- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
30953+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
30954 shost->dma_channel = 0xff;
30955
30956 /* These three are default values which can be overridden */
30957diff -urNp linux-3.0.4/drivers/scsi/hpsa.c linux-3.0.4/drivers/scsi/hpsa.c
30958--- linux-3.0.4/drivers/scsi/hpsa.c 2011-07-21 22:17:23.000000000 -0400
30959+++ linux-3.0.4/drivers/scsi/hpsa.c 2011-08-23 21:47:55.000000000 -0400
30960@@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
30961 u32 a;
30962
30963 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
30964- return h->access.command_completed(h);
30965+ return h->access->command_completed(h);
30966
30967 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
30968 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
30969@@ -2938,7 +2938,7 @@ static void start_io(struct ctlr_info *h
30970 while (!list_empty(&h->reqQ)) {
30971 c = list_entry(h->reqQ.next, struct CommandList, list);
30972 /* can't do anything if fifo is full */
30973- if ((h->access.fifo_full(h))) {
30974+ if ((h->access->fifo_full(h))) {
30975 dev_warn(&h->pdev->dev, "fifo full\n");
30976 break;
30977 }
30978@@ -2948,7 +2948,7 @@ static void start_io(struct ctlr_info *h
30979 h->Qdepth--;
30980
30981 /* Tell the controller execute command */
30982- h->access.submit_command(h, c);
30983+ h->access->submit_command(h, c);
30984
30985 /* Put job onto the completed Q */
30986 addQ(&h->cmpQ, c);
30987@@ -2957,17 +2957,17 @@ static void start_io(struct ctlr_info *h
30988
30989 static inline unsigned long get_next_completion(struct ctlr_info *h)
30990 {
30991- return h->access.command_completed(h);
30992+ return h->access->command_completed(h);
30993 }
30994
30995 static inline bool interrupt_pending(struct ctlr_info *h)
30996 {
30997- return h->access.intr_pending(h);
30998+ return h->access->intr_pending(h);
30999 }
31000
31001 static inline long interrupt_not_for_us(struct ctlr_info *h)
31002 {
31003- return (h->access.intr_pending(h) == 0) ||
31004+ return (h->access->intr_pending(h) == 0) ||
31005 (h->interrupts_enabled == 0);
31006 }
31007
31008@@ -3857,7 +3857,7 @@ static int __devinit hpsa_pci_init(struc
31009 if (prod_index < 0)
31010 return -ENODEV;
31011 h->product_name = products[prod_index].product_name;
31012- h->access = *(products[prod_index].access);
31013+ h->access = products[prod_index].access;
31014
31015 if (hpsa_board_disabled(h->pdev)) {
31016 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31017@@ -4134,7 +4134,7 @@ reinit_after_soft_reset:
31018 }
31019
31020 /* make sure the board interrupts are off */
31021- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31022+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31023
31024 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
31025 goto clean2;
31026@@ -4168,7 +4168,7 @@ reinit_after_soft_reset:
31027 * fake ones to scoop up any residual completions.
31028 */
31029 spin_lock_irqsave(&h->lock, flags);
31030- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31031+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31032 spin_unlock_irqrestore(&h->lock, flags);
31033 free_irq(h->intr[h->intr_mode], h);
31034 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
31035@@ -4187,9 +4187,9 @@ reinit_after_soft_reset:
31036 dev_info(&h->pdev->dev, "Board READY.\n");
31037 dev_info(&h->pdev->dev,
31038 "Waiting for stale completions to drain.\n");
31039- h->access.set_intr_mask(h, HPSA_INTR_ON);
31040+ h->access->set_intr_mask(h, HPSA_INTR_ON);
31041 msleep(10000);
31042- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31043+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31044
31045 rc = controller_reset_failed(h->cfgtable);
31046 if (rc)
31047@@ -4210,7 +4210,7 @@ reinit_after_soft_reset:
31048 }
31049
31050 /* Turn the interrupts on so we can service requests */
31051- h->access.set_intr_mask(h, HPSA_INTR_ON);
31052+ h->access->set_intr_mask(h, HPSA_INTR_ON);
31053
31054 hpsa_hba_inquiry(h);
31055 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
31056@@ -4263,7 +4263,7 @@ static void hpsa_shutdown(struct pci_dev
31057 * To write all data in the battery backed cache to disks
31058 */
31059 hpsa_flush_cache(h);
31060- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31061+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31062 free_irq(h->intr[h->intr_mode], h);
31063 #ifdef CONFIG_PCI_MSI
31064 if (h->msix_vector)
31065@@ -4426,7 +4426,7 @@ static __devinit void hpsa_enter_perform
31066 return;
31067 }
31068 /* Change the access methods to the performant access methods */
31069- h->access = SA5_performant_access;
31070+ h->access = &SA5_performant_access;
31071 h->transMethod = CFGTBL_Trans_Performant;
31072 }
31073
31074diff -urNp linux-3.0.4/drivers/scsi/hpsa.h linux-3.0.4/drivers/scsi/hpsa.h
31075--- linux-3.0.4/drivers/scsi/hpsa.h 2011-08-23 21:44:40.000000000 -0400
31076+++ linux-3.0.4/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
31077@@ -73,7 +73,7 @@ struct ctlr_info {
31078 unsigned int msix_vector;
31079 unsigned int msi_vector;
31080 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31081- struct access_method access;
31082+ struct access_method *access;
31083
31084 /* queue and queue Info */
31085 struct list_head reqQ;
31086diff -urNp linux-3.0.4/drivers/scsi/ips.h linux-3.0.4/drivers/scsi/ips.h
31087--- linux-3.0.4/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
31088+++ linux-3.0.4/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
31089@@ -1027,7 +1027,7 @@ typedef struct {
31090 int (*intr)(struct ips_ha *);
31091 void (*enableint)(struct ips_ha *);
31092 uint32_t (*statupd)(struct ips_ha *);
31093-} ips_hw_func_t;
31094+} __no_const ips_hw_func_t;
31095
31096 typedef struct ips_ha {
31097 uint8_t ha_id[IPS_MAX_CHANNELS+1];
31098diff -urNp linux-3.0.4/drivers/scsi/libfc/fc_exch.c linux-3.0.4/drivers/scsi/libfc/fc_exch.c
31099--- linux-3.0.4/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
31100+++ linux-3.0.4/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
31101@@ -105,12 +105,12 @@ struct fc_exch_mgr {
31102 * all together if not used XXX
31103 */
31104 struct {
31105- atomic_t no_free_exch;
31106- atomic_t no_free_exch_xid;
31107- atomic_t xid_not_found;
31108- atomic_t xid_busy;
31109- atomic_t seq_not_found;
31110- atomic_t non_bls_resp;
31111+ atomic_unchecked_t no_free_exch;
31112+ atomic_unchecked_t no_free_exch_xid;
31113+ atomic_unchecked_t xid_not_found;
31114+ atomic_unchecked_t xid_busy;
31115+ atomic_unchecked_t seq_not_found;
31116+ atomic_unchecked_t non_bls_resp;
31117 } stats;
31118 };
31119
31120@@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31121 /* allocate memory for exchange */
31122 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31123 if (!ep) {
31124- atomic_inc(&mp->stats.no_free_exch);
31125+ atomic_inc_unchecked(&mp->stats.no_free_exch);
31126 goto out;
31127 }
31128 memset(ep, 0, sizeof(*ep));
31129@@ -761,7 +761,7 @@ out:
31130 return ep;
31131 err:
31132 spin_unlock_bh(&pool->lock);
31133- atomic_inc(&mp->stats.no_free_exch_xid);
31134+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31135 mempool_free(ep, mp->ep_pool);
31136 return NULL;
31137 }
31138@@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31139 xid = ntohs(fh->fh_ox_id); /* we originated exch */
31140 ep = fc_exch_find(mp, xid);
31141 if (!ep) {
31142- atomic_inc(&mp->stats.xid_not_found);
31143+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31144 reject = FC_RJT_OX_ID;
31145 goto out;
31146 }
31147@@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31148 ep = fc_exch_find(mp, xid);
31149 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31150 if (ep) {
31151- atomic_inc(&mp->stats.xid_busy);
31152+ atomic_inc_unchecked(&mp->stats.xid_busy);
31153 reject = FC_RJT_RX_ID;
31154 goto rel;
31155 }
31156@@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31157 }
31158 xid = ep->xid; /* get our XID */
31159 } else if (!ep) {
31160- atomic_inc(&mp->stats.xid_not_found);
31161+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31162 reject = FC_RJT_RX_ID; /* XID not found */
31163 goto out;
31164 }
31165@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31166 } else {
31167 sp = &ep->seq;
31168 if (sp->id != fh->fh_seq_id) {
31169- atomic_inc(&mp->stats.seq_not_found);
31170+ atomic_inc_unchecked(&mp->stats.seq_not_found);
31171 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31172 goto rel;
31173 }
31174@@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31175
31176 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31177 if (!ep) {
31178- atomic_inc(&mp->stats.xid_not_found);
31179+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31180 goto out;
31181 }
31182 if (ep->esb_stat & ESB_ST_COMPLETE) {
31183- atomic_inc(&mp->stats.xid_not_found);
31184+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31185 goto rel;
31186 }
31187 if (ep->rxid == FC_XID_UNKNOWN)
31188 ep->rxid = ntohs(fh->fh_rx_id);
31189 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31190- atomic_inc(&mp->stats.xid_not_found);
31191+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31192 goto rel;
31193 }
31194 if (ep->did != ntoh24(fh->fh_s_id) &&
31195 ep->did != FC_FID_FLOGI) {
31196- atomic_inc(&mp->stats.xid_not_found);
31197+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31198 goto rel;
31199 }
31200 sof = fr_sof(fp);
31201@@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31202 sp->ssb_stat |= SSB_ST_RESP;
31203 sp->id = fh->fh_seq_id;
31204 } else if (sp->id != fh->fh_seq_id) {
31205- atomic_inc(&mp->stats.seq_not_found);
31206+ atomic_inc_unchecked(&mp->stats.seq_not_found);
31207 goto rel;
31208 }
31209
31210@@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
31211 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31212
31213 if (!sp)
31214- atomic_inc(&mp->stats.xid_not_found);
31215+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31216 else
31217- atomic_inc(&mp->stats.non_bls_resp);
31218+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
31219
31220 fc_frame_free(fp);
31221 }
31222diff -urNp linux-3.0.4/drivers/scsi/libsas/sas_ata.c linux-3.0.4/drivers/scsi/libsas/sas_ata.c
31223--- linux-3.0.4/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
31224+++ linux-3.0.4/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
31225@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
31226 .postreset = ata_std_postreset,
31227 .error_handler = ata_std_error_handler,
31228 .post_internal_cmd = sas_ata_post_internal,
31229- .qc_defer = ata_std_qc_defer,
31230+ .qc_defer = ata_std_qc_defer,
31231 .qc_prep = ata_noop_qc_prep,
31232 .qc_issue = sas_ata_qc_issue,
31233 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31234diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c
31235--- linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
31236+++ linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
31237@@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31238
31239 #include <linux/debugfs.h>
31240
31241-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31242+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31243 static unsigned long lpfc_debugfs_start_time = 0L;
31244
31245 /* iDiag */
31246@@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31247 lpfc_debugfs_enable = 0;
31248
31249 len = 0;
31250- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31251+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31252 (lpfc_debugfs_max_disc_trc - 1);
31253 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31254 dtp = vport->disc_trc + i;
31255@@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31256 lpfc_debugfs_enable = 0;
31257
31258 len = 0;
31259- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31260+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31261 (lpfc_debugfs_max_slow_ring_trc - 1);
31262 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31263 dtp = phba->slow_ring_trc + i;
31264@@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31265 uint32_t *ptr;
31266 char buffer[1024];
31267
31268+ pax_track_stack();
31269+
31270 off = 0;
31271 spin_lock_irq(&phba->hbalock);
31272
31273@@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31274 !vport || !vport->disc_trc)
31275 return;
31276
31277- index = atomic_inc_return(&vport->disc_trc_cnt) &
31278+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31279 (lpfc_debugfs_max_disc_trc - 1);
31280 dtp = vport->disc_trc + index;
31281 dtp->fmt = fmt;
31282 dtp->data1 = data1;
31283 dtp->data2 = data2;
31284 dtp->data3 = data3;
31285- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31286+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31287 dtp->jif = jiffies;
31288 #endif
31289 return;
31290@@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31291 !phba || !phba->slow_ring_trc)
31292 return;
31293
31294- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31295+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31296 (lpfc_debugfs_max_slow_ring_trc - 1);
31297 dtp = phba->slow_ring_trc + index;
31298 dtp->fmt = fmt;
31299 dtp->data1 = data1;
31300 dtp->data2 = data2;
31301 dtp->data3 = data3;
31302- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31303+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31304 dtp->jif = jiffies;
31305 #endif
31306 return;
31307@@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31308 "slow_ring buffer\n");
31309 goto debug_failed;
31310 }
31311- atomic_set(&phba->slow_ring_trc_cnt, 0);
31312+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31313 memset(phba->slow_ring_trc, 0,
31314 (sizeof(struct lpfc_debugfs_trc) *
31315 lpfc_debugfs_max_slow_ring_trc));
31316@@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31317 "buffer\n");
31318 goto debug_failed;
31319 }
31320- atomic_set(&vport->disc_trc_cnt, 0);
31321+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31322
31323 snprintf(name, sizeof(name), "discovery_trace");
31324 vport->debug_disc_trc =
31325diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc.h linux-3.0.4/drivers/scsi/lpfc/lpfc.h
31326--- linux-3.0.4/drivers/scsi/lpfc/lpfc.h 2011-07-21 22:17:23.000000000 -0400
31327+++ linux-3.0.4/drivers/scsi/lpfc/lpfc.h 2011-08-23 21:47:55.000000000 -0400
31328@@ -420,7 +420,7 @@ struct lpfc_vport {
31329 struct dentry *debug_nodelist;
31330 struct dentry *vport_debugfs_root;
31331 struct lpfc_debugfs_trc *disc_trc;
31332- atomic_t disc_trc_cnt;
31333+ atomic_unchecked_t disc_trc_cnt;
31334 #endif
31335 uint8_t stat_data_enabled;
31336 uint8_t stat_data_blocked;
31337@@ -826,8 +826,8 @@ struct lpfc_hba {
31338 struct timer_list fabric_block_timer;
31339 unsigned long bit_flags;
31340 #define FABRIC_COMANDS_BLOCKED 0
31341- atomic_t num_rsrc_err;
31342- atomic_t num_cmd_success;
31343+ atomic_unchecked_t num_rsrc_err;
31344+ atomic_unchecked_t num_cmd_success;
31345 unsigned long last_rsrc_error_time;
31346 unsigned long last_ramp_down_time;
31347 unsigned long last_ramp_up_time;
31348@@ -841,7 +841,7 @@ struct lpfc_hba {
31349 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31350 struct dentry *debug_slow_ring_trc;
31351 struct lpfc_debugfs_trc *slow_ring_trc;
31352- atomic_t slow_ring_trc_cnt;
31353+ atomic_unchecked_t slow_ring_trc_cnt;
31354 /* iDiag debugfs sub-directory */
31355 struct dentry *idiag_root;
31356 struct dentry *idiag_pci_cfg;
31357diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c
31358--- linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c 2011-07-21 22:17:23.000000000 -0400
31359+++ linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c 2011-08-23 21:47:56.000000000 -0400
31360@@ -9923,8 +9923,10 @@ lpfc_init(void)
31361 printk(LPFC_COPYRIGHT "\n");
31362
31363 if (lpfc_enable_npiv) {
31364- lpfc_transport_functions.vport_create = lpfc_vport_create;
31365- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31366+ pax_open_kernel();
31367+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31368+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31369+ pax_close_kernel();
31370 }
31371 lpfc_transport_template =
31372 fc_attach_transport(&lpfc_transport_functions);
31373diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c
31374--- linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-07-21 22:17:23.000000000 -0400
31375+++ linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-23 21:47:56.000000000 -0400
31376@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31377 uint32_t evt_posted;
31378
31379 spin_lock_irqsave(&phba->hbalock, flags);
31380- atomic_inc(&phba->num_rsrc_err);
31381+ atomic_inc_unchecked(&phba->num_rsrc_err);
31382 phba->last_rsrc_error_time = jiffies;
31383
31384 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31385@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31386 unsigned long flags;
31387 struct lpfc_hba *phba = vport->phba;
31388 uint32_t evt_posted;
31389- atomic_inc(&phba->num_cmd_success);
31390+ atomic_inc_unchecked(&phba->num_cmd_success);
31391
31392 if (vport->cfg_lun_queue_depth <= queue_depth)
31393 return;
31394@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31395 unsigned long num_rsrc_err, num_cmd_success;
31396 int i;
31397
31398- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31399- num_cmd_success = atomic_read(&phba->num_cmd_success);
31400+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31401+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31402
31403 vports = lpfc_create_vport_work_array(phba);
31404 if (vports != NULL)
31405@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31406 }
31407 }
31408 lpfc_destroy_vport_work_array(phba, vports);
31409- atomic_set(&phba->num_rsrc_err, 0);
31410- atomic_set(&phba->num_cmd_success, 0);
31411+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
31412+ atomic_set_unchecked(&phba->num_cmd_success, 0);
31413 }
31414
31415 /**
31416@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31417 }
31418 }
31419 lpfc_destroy_vport_work_array(phba, vports);
31420- atomic_set(&phba->num_rsrc_err, 0);
31421- atomic_set(&phba->num_cmd_success, 0);
31422+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
31423+ atomic_set_unchecked(&phba->num_cmd_success, 0);
31424 }
31425
31426 /**
31427diff -urNp linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c
31428--- linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
31429+++ linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
31430@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
31431 int rval;
31432 int i;
31433
31434+ pax_track_stack();
31435+
31436 // Allocate memory for the base list of scb for management module.
31437 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31438
31439diff -urNp linux-3.0.4/drivers/scsi/osd/osd_initiator.c linux-3.0.4/drivers/scsi/osd/osd_initiator.c
31440--- linux-3.0.4/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
31441+++ linux-3.0.4/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
31442@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
31443 int nelem = ARRAY_SIZE(get_attrs), a = 0;
31444 int ret;
31445
31446+ pax_track_stack();
31447+
31448 or = osd_start_request(od, GFP_KERNEL);
31449 if (!or)
31450 return -ENOMEM;
31451diff -urNp linux-3.0.4/drivers/scsi/pmcraid.c linux-3.0.4/drivers/scsi/pmcraid.c
31452--- linux-3.0.4/drivers/scsi/pmcraid.c 2011-08-23 21:44:40.000000000 -0400
31453+++ linux-3.0.4/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
31454@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
31455 res->scsi_dev = scsi_dev;
31456 scsi_dev->hostdata = res;
31457 res->change_detected = 0;
31458- atomic_set(&res->read_failures, 0);
31459- atomic_set(&res->write_failures, 0);
31460+ atomic_set_unchecked(&res->read_failures, 0);
31461+ atomic_set_unchecked(&res->write_failures, 0);
31462 rc = 0;
31463 }
31464 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
31465@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
31466
31467 /* If this was a SCSI read/write command keep count of errors */
31468 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
31469- atomic_inc(&res->read_failures);
31470+ atomic_inc_unchecked(&res->read_failures);
31471 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
31472- atomic_inc(&res->write_failures);
31473+ atomic_inc_unchecked(&res->write_failures);
31474
31475 if (!RES_IS_GSCSI(res->cfg_entry) &&
31476 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
31477@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
31478 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31479 * hrrq_id assigned here in queuecommand
31480 */
31481- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31482+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31483 pinstance->num_hrrq;
31484 cmd->cmd_done = pmcraid_io_done;
31485
31486@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
31487 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31488 * hrrq_id assigned here in queuecommand
31489 */
31490- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31491+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31492 pinstance->num_hrrq;
31493
31494 if (request_size) {
31495@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
31496
31497 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
31498 /* add resources only after host is added into system */
31499- if (!atomic_read(&pinstance->expose_resources))
31500+ if (!atomic_read_unchecked(&pinstance->expose_resources))
31501 return;
31502
31503 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
31504@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
31505 init_waitqueue_head(&pinstance->reset_wait_q);
31506
31507 atomic_set(&pinstance->outstanding_cmds, 0);
31508- atomic_set(&pinstance->last_message_id, 0);
31509- atomic_set(&pinstance->expose_resources, 0);
31510+ atomic_set_unchecked(&pinstance->last_message_id, 0);
31511+ atomic_set_unchecked(&pinstance->expose_resources, 0);
31512
31513 INIT_LIST_HEAD(&pinstance->free_res_q);
31514 INIT_LIST_HEAD(&pinstance->used_res_q);
31515@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
31516 /* Schedule worker thread to handle CCN and take care of adding and
31517 * removing devices to OS
31518 */
31519- atomic_set(&pinstance->expose_resources, 1);
31520+ atomic_set_unchecked(&pinstance->expose_resources, 1);
31521 schedule_work(&pinstance->worker_q);
31522 return rc;
31523
31524diff -urNp linux-3.0.4/drivers/scsi/pmcraid.h linux-3.0.4/drivers/scsi/pmcraid.h
31525--- linux-3.0.4/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
31526+++ linux-3.0.4/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
31527@@ -749,7 +749,7 @@ struct pmcraid_instance {
31528 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
31529
31530 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
31531- atomic_t last_message_id;
31532+ atomic_unchecked_t last_message_id;
31533
31534 /* configuration table */
31535 struct pmcraid_config_table *cfg_table;
31536@@ -778,7 +778,7 @@ struct pmcraid_instance {
31537 atomic_t outstanding_cmds;
31538
31539 /* should add/delete resources to mid-layer now ?*/
31540- atomic_t expose_resources;
31541+ atomic_unchecked_t expose_resources;
31542
31543
31544
31545@@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
31546 struct pmcraid_config_table_entry_ext cfg_entry_ext;
31547 };
31548 struct scsi_device *scsi_dev; /* Link scsi_device structure */
31549- atomic_t read_failures; /* count of failed READ commands */
31550- atomic_t write_failures; /* count of failed WRITE commands */
31551+ atomic_unchecked_t read_failures; /* count of failed READ commands */
31552+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
31553
31554 /* To indicate add/delete/modify during CCN */
31555 u8 change_detected;
31556diff -urNp linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h
31557--- linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
31558+++ linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
31559@@ -2244,7 +2244,7 @@ struct isp_operations {
31560 int (*get_flash_version) (struct scsi_qla_host *, void *);
31561 int (*start_scsi) (srb_t *);
31562 int (*abort_isp) (struct scsi_qla_host *);
31563-};
31564+} __no_const;
31565
31566 /* MSI-X Support *************************************************************/
31567
31568diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h
31569--- linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
31570+++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
31571@@ -256,7 +256,7 @@ struct ddb_entry {
31572 atomic_t retry_relogin_timer; /* Min Time between relogins
31573 * (4000 only) */
31574 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
31575- atomic_t relogin_retry_count; /* Num of times relogin has been
31576+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
31577 * retried */
31578
31579 uint16_t port;
31580diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c
31581--- linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
31582+++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
31583@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
31584 ddb_entry->fw_ddb_index = fw_ddb_index;
31585 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
31586 atomic_set(&ddb_entry->relogin_timer, 0);
31587- atomic_set(&ddb_entry->relogin_retry_count, 0);
31588+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31589 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31590 list_add_tail(&ddb_entry->list, &ha->ddb_list);
31591 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
31592@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
31593 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
31594 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
31595 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31596- atomic_set(&ddb_entry->relogin_retry_count, 0);
31597+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31598 atomic_set(&ddb_entry->relogin_timer, 0);
31599 clear_bit(DF_RELOGIN, &ddb_entry->flags);
31600 iscsi_unblock_session(ddb_entry->sess);
31601diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c
31602--- linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
31603+++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
31604@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
31605 ddb_entry->fw_ddb_device_state ==
31606 DDB_DS_SESSION_FAILED) {
31607 /* Reset retry relogin timer */
31608- atomic_inc(&ddb_entry->relogin_retry_count);
31609+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
31610 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
31611 " timed out-retrying"
31612 " relogin (%d)\n",
31613 ha->host_no,
31614 ddb_entry->fw_ddb_index,
31615- atomic_read(&ddb_entry->
31616+ atomic_read_unchecked(&ddb_entry->
31617 relogin_retry_count))
31618 );
31619 start_dpc++;
31620diff -urNp linux-3.0.4/drivers/scsi/scsi.c linux-3.0.4/drivers/scsi/scsi.c
31621--- linux-3.0.4/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
31622+++ linux-3.0.4/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
31623@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
31624 unsigned long timeout;
31625 int rtn = 0;
31626
31627- atomic_inc(&cmd->device->iorequest_cnt);
31628+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31629
31630 /* check if the device is still usable */
31631 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
31632diff -urNp linux-3.0.4/drivers/scsi/scsi_debug.c linux-3.0.4/drivers/scsi/scsi_debug.c
31633--- linux-3.0.4/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
31634+++ linux-3.0.4/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
31635@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
31636 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
31637 unsigned char *cmd = (unsigned char *)scp->cmnd;
31638
31639+ pax_track_stack();
31640+
31641 if ((errsts = check_readiness(scp, 1, devip)))
31642 return errsts;
31643 memset(arr, 0, sizeof(arr));
31644@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
31645 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
31646 unsigned char *cmd = (unsigned char *)scp->cmnd;
31647
31648+ pax_track_stack();
31649+
31650 if ((errsts = check_readiness(scp, 1, devip)))
31651 return errsts;
31652 memset(arr, 0, sizeof(arr));
31653diff -urNp linux-3.0.4/drivers/scsi/scsi_lib.c linux-3.0.4/drivers/scsi/scsi_lib.c
31654--- linux-3.0.4/drivers/scsi/scsi_lib.c 2011-08-23 21:44:40.000000000 -0400
31655+++ linux-3.0.4/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
31656@@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
31657 shost = sdev->host;
31658 scsi_init_cmd_errh(cmd);
31659 cmd->result = DID_NO_CONNECT << 16;
31660- atomic_inc(&cmd->device->iorequest_cnt);
31661+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31662
31663 /*
31664 * SCSI request completion path will do scsi_device_unbusy(),
31665@@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
31666
31667 INIT_LIST_HEAD(&cmd->eh_entry);
31668
31669- atomic_inc(&cmd->device->iodone_cnt);
31670+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
31671 if (cmd->result)
31672- atomic_inc(&cmd->device->ioerr_cnt);
31673+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
31674
31675 disposition = scsi_decide_disposition(cmd);
31676 if (disposition != SUCCESS &&
31677diff -urNp linux-3.0.4/drivers/scsi/scsi_sysfs.c linux-3.0.4/drivers/scsi/scsi_sysfs.c
31678--- linux-3.0.4/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
31679+++ linux-3.0.4/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
31680@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
31681 char *buf) \
31682 { \
31683 struct scsi_device *sdev = to_scsi_device(dev); \
31684- unsigned long long count = atomic_read(&sdev->field); \
31685+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
31686 return snprintf(buf, 20, "0x%llx\n", count); \
31687 } \
31688 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
31689diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_fc.c linux-3.0.4/drivers/scsi/scsi_transport_fc.c
31690--- linux-3.0.4/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
31691+++ linux-3.0.4/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
31692@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
31693 * Netlink Infrastructure
31694 */
31695
31696-static atomic_t fc_event_seq;
31697+static atomic_unchecked_t fc_event_seq;
31698
31699 /**
31700 * fc_get_event_number - Obtain the next sequential FC event number
31701@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
31702 u32
31703 fc_get_event_number(void)
31704 {
31705- return atomic_add_return(1, &fc_event_seq);
31706+ return atomic_add_return_unchecked(1, &fc_event_seq);
31707 }
31708 EXPORT_SYMBOL(fc_get_event_number);
31709
31710@@ -645,7 +645,7 @@ static __init int fc_transport_init(void
31711 {
31712 int error;
31713
31714- atomic_set(&fc_event_seq, 0);
31715+ atomic_set_unchecked(&fc_event_seq, 0);
31716
31717 error = transport_class_register(&fc_host_class);
31718 if (error)
31719@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
31720 char *cp;
31721
31722 *val = simple_strtoul(buf, &cp, 0);
31723- if ((*cp && (*cp != '\n')) || (*val < 0))
31724+ if (*cp && (*cp != '\n'))
31725 return -EINVAL;
31726 /*
31727 * Check for overflow; dev_loss_tmo is u32
31728diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c
31729--- linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
31730+++ linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
31731@@ -83,7 +83,7 @@ struct iscsi_internal {
31732 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
31733 };
31734
31735-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
31736+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
31737 static struct workqueue_struct *iscsi_eh_timer_workq;
31738
31739 /*
31740@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
31741 int err;
31742
31743 ihost = shost->shost_data;
31744- session->sid = atomic_add_return(1, &iscsi_session_nr);
31745+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
31746
31747 if (id == ISCSI_MAX_TARGET) {
31748 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
31749@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
31750 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
31751 ISCSI_TRANSPORT_VERSION);
31752
31753- atomic_set(&iscsi_session_nr, 0);
31754+ atomic_set_unchecked(&iscsi_session_nr, 0);
31755
31756 err = class_register(&iscsi_transport_class);
31757 if (err)
31758diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_srp.c linux-3.0.4/drivers/scsi/scsi_transport_srp.c
31759--- linux-3.0.4/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
31760+++ linux-3.0.4/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
31761@@ -33,7 +33,7 @@
31762 #include "scsi_transport_srp_internal.h"
31763
31764 struct srp_host_attrs {
31765- atomic_t next_port_id;
31766+ atomic_unchecked_t next_port_id;
31767 };
31768 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
31769
31770@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
31771 struct Scsi_Host *shost = dev_to_shost(dev);
31772 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
31773
31774- atomic_set(&srp_host->next_port_id, 0);
31775+ atomic_set_unchecked(&srp_host->next_port_id, 0);
31776 return 0;
31777 }
31778
31779@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
31780 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
31781 rport->roles = ids->roles;
31782
31783- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
31784+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
31785 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
31786
31787 transport_setup_device(&rport->dev);
31788diff -urNp linux-3.0.4/drivers/scsi/sg.c linux-3.0.4/drivers/scsi/sg.c
31789--- linux-3.0.4/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
31790+++ linux-3.0.4/drivers/scsi/sg.c 2011-08-23 21:47:56.000000000 -0400
31791@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
31792 const struct file_operations * fops;
31793 };
31794
31795-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
31796+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
31797 {"allow_dio", &adio_fops},
31798 {"debug", &debug_fops},
31799 {"def_reserved_size", &dressz_fops},
31800@@ -2325,7 +2325,7 @@ sg_proc_init(void)
31801 {
31802 int k, mask;
31803 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
31804- struct sg_proc_leaf * leaf;
31805+ const struct sg_proc_leaf * leaf;
31806
31807 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
31808 if (!sg_proc_sgp)
31809diff -urNp linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c
31810--- linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
31811+++ linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
31812@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
31813 int do_iounmap = 0;
31814 int do_disable_device = 1;
31815
31816+ pax_track_stack();
31817+
31818 memset(&sym_dev, 0, sizeof(sym_dev));
31819 memset(&nvram, 0, sizeof(nvram));
31820 sym_dev.pdev = pdev;
31821diff -urNp linux-3.0.4/drivers/scsi/vmw_pvscsi.c linux-3.0.4/drivers/scsi/vmw_pvscsi.c
31822--- linux-3.0.4/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
31823+++ linux-3.0.4/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
31824@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
31825 dma_addr_t base;
31826 unsigned i;
31827
31828+ pax_track_stack();
31829+
31830 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
31831 cmd.reqRingNumPages = adapter->req_pages;
31832 cmd.cmpRingNumPages = adapter->cmp_pages;
31833diff -urNp linux-3.0.4/drivers/spi/spi.c linux-3.0.4/drivers/spi/spi.c
31834--- linux-3.0.4/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
31835+++ linux-3.0.4/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
31836@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
31837 EXPORT_SYMBOL_GPL(spi_bus_unlock);
31838
31839 /* portable code must never pass more than 32 bytes */
31840-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
31841+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
31842
31843 static u8 *buf;
31844
31845diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c
31846--- linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:44:40.000000000 -0400
31847+++ linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
31848@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
31849 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
31850
31851
31852-static struct net_device_ops ar6000_netdev_ops = {
31853+static net_device_ops_no_const ar6000_netdev_ops = {
31854 .ndo_init = NULL,
31855 .ndo_open = ar6000_open,
31856 .ndo_stop = ar6000_close,
31857diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
31858--- linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
31859+++ linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
31860@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
31861 typedef struct ar6k_pal_config_s
31862 {
31863 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
31864-}ar6k_pal_config_t;
31865+} __no_const ar6k_pal_config_t;
31866
31867 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
31868 #endif /* _AR6K_PAL_H_ */
31869diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
31870--- linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
31871+++ linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
31872@@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
31873 free_netdev(ifp->net);
31874 }
31875 /* Allocate etherdev, including space for private structure */
31876- ifp->net = alloc_etherdev(sizeof(dhd));
31877+ ifp->net = alloc_etherdev(sizeof(*dhd));
31878 if (!ifp->net) {
31879 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31880 ret = -ENOMEM;
31881 }
31882 if (ret == 0) {
31883 strcpy(ifp->net->name, ifp->name);
31884- memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
31885+ memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
31886 err = dhd_net_attach(&dhd->pub, ifp->idx);
31887 if (err != 0) {
31888 DHD_ERROR(("%s: dhd_net_attach failed, "
31889@@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31890 strcpy(nv_path, nvram_path);
31891
31892 /* Allocate etherdev, including space for private structure */
31893- net = alloc_etherdev(sizeof(dhd));
31894+ net = alloc_etherdev(sizeof(*dhd));
31895 if (!net) {
31896 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31897 goto fail;
31898@@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31899 /*
31900 * Save the dhd_info into the priv
31901 */
31902- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31903+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31904
31905 /* Set network interface name if it was provided as module parameter */
31906 if (iface_name[0]) {
31907@@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31908 /*
31909 * Save the dhd_info into the priv
31910 */
31911- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31912+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31913
31914 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
31915 g_bus = bus;
31916diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
31917--- linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
31918+++ linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
31919@@ -593,7 +593,7 @@ struct phy_func_ptr {
31920 initfn_t carrsuppr;
31921 rxsigpwrfn_t rxsigpwr;
31922 detachfn_t detach;
31923-};
31924+} __no_const;
31925 typedef struct phy_func_ptr phy_func_ptr_t;
31926
31927 struct phy_info {
31928diff -urNp linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h
31929--- linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
31930+++ linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
31931@@ -185,7 +185,7 @@ typedef struct {
31932 u16 func, uint bustype, void *regsva, void *param);
31933 /* detach from device */
31934 void (*detach) (void *ch);
31935-} bcmsdh_driver_t;
31936+} __no_const bcmsdh_driver_t;
31937
31938 /* platform specific/high level functions */
31939 extern int bcmsdh_register(bcmsdh_driver_t *driver);
31940diff -urNp linux-3.0.4/drivers/staging/et131x/et1310_tx.c linux-3.0.4/drivers/staging/et131x/et1310_tx.c
31941--- linux-3.0.4/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
31942+++ linux-3.0.4/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
31943@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
31944 struct net_device_stats *stats = &etdev->net_stats;
31945
31946 if (tcb->flags & fMP_DEST_BROAD)
31947- atomic_inc(&etdev->Stats.brdcstxmt);
31948+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
31949 else if (tcb->flags & fMP_DEST_MULTI)
31950- atomic_inc(&etdev->Stats.multixmt);
31951+ atomic_inc_unchecked(&etdev->Stats.multixmt);
31952 else
31953- atomic_inc(&etdev->Stats.unixmt);
31954+ atomic_inc_unchecked(&etdev->Stats.unixmt);
31955
31956 if (tcb->skb) {
31957 stats->tx_bytes += tcb->skb->len;
31958diff -urNp linux-3.0.4/drivers/staging/et131x/et131x_adapter.h linux-3.0.4/drivers/staging/et131x/et131x_adapter.h
31959--- linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
31960+++ linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
31961@@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
31962 * operations
31963 */
31964 u32 unircv; /* # multicast packets received */
31965- atomic_t unixmt; /* # multicast packets for Tx */
31966+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
31967 u32 multircv; /* # multicast packets received */
31968- atomic_t multixmt; /* # multicast packets for Tx */
31969+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
31970 u32 brdcstrcv; /* # broadcast packets received */
31971- atomic_t brdcstxmt; /* # broadcast packets for Tx */
31972+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
31973 u32 norcvbuf; /* # Rx packets discarded */
31974 u32 noxmtbuf; /* # Tx packets discarded */
31975
31976diff -urNp linux-3.0.4/drivers/staging/hv/channel.c linux-3.0.4/drivers/staging/hv/channel.c
31977--- linux-3.0.4/drivers/staging/hv/channel.c 2011-08-23 21:44:40.000000000 -0400
31978+++ linux-3.0.4/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
31979@@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
31980 int ret = 0;
31981 int t;
31982
31983- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31984- atomic_inc(&vmbus_connection.next_gpadl_handle);
31985+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31986+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31987
31988 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31989 if (ret)
31990diff -urNp linux-3.0.4/drivers/staging/hv/hv.c linux-3.0.4/drivers/staging/hv/hv.c
31991--- linux-3.0.4/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
31992+++ linux-3.0.4/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
31993@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
31994 u64 output_address = (output) ? virt_to_phys(output) : 0;
31995 u32 output_address_hi = output_address >> 32;
31996 u32 output_address_lo = output_address & 0xFFFFFFFF;
31997- volatile void *hypercall_page = hv_context.hypercall_page;
31998+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31999
32000 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
32001 "=a"(hv_status_lo) : "d" (control_hi),
32002diff -urNp linux-3.0.4/drivers/staging/hv/hv_mouse.c linux-3.0.4/drivers/staging/hv/hv_mouse.c
32003--- linux-3.0.4/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
32004+++ linux-3.0.4/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
32005@@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
32006 if (hid_dev) {
32007 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
32008
32009- hid_dev->ll_driver->open = mousevsc_hid_open;
32010- hid_dev->ll_driver->close = mousevsc_hid_close;
32011+ pax_open_kernel();
32012+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
32013+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
32014+ pax_close_kernel();
32015
32016 hid_dev->bus = BUS_VIRTUAL;
32017 hid_dev->vendor = input_device_ctx->device_info.vendor;
32018diff -urNp linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h
32019--- linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
32020+++ linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
32021@@ -559,7 +559,7 @@ enum vmbus_connect_state {
32022 struct vmbus_connection {
32023 enum vmbus_connect_state conn_state;
32024
32025- atomic_t next_gpadl_handle;
32026+ atomic_unchecked_t next_gpadl_handle;
32027
32028 /*
32029 * Represents channel interrupts. Each bit position represents a
32030diff -urNp linux-3.0.4/drivers/staging/hv/rndis_filter.c linux-3.0.4/drivers/staging/hv/rndis_filter.c
32031--- linux-3.0.4/drivers/staging/hv/rndis_filter.c 2011-08-23 21:44:40.000000000 -0400
32032+++ linux-3.0.4/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
32033@@ -43,7 +43,7 @@ struct rndis_device {
32034
32035 enum rndis_device_state state;
32036 u32 link_stat;
32037- atomic_t new_req_id;
32038+ atomic_unchecked_t new_req_id;
32039
32040 spinlock_t request_lock;
32041 struct list_head req_list;
32042@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
32043 * template
32044 */
32045 set = &rndis_msg->msg.set_req;
32046- set->req_id = atomic_inc_return(&dev->new_req_id);
32047+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32048
32049 /* Add to the request list */
32050 spin_lock_irqsave(&dev->request_lock, flags);
32051@@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
32052
32053 /* Setup the rndis set */
32054 halt = &request->request_msg.msg.halt_req;
32055- halt->req_id = atomic_inc_return(&dev->new_req_id);
32056+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32057
32058 /* Ignore return since this msg is optional. */
32059 rndis_filter_send_request(dev, request);
32060diff -urNp linux-3.0.4/drivers/staging/hv/vmbus_drv.c linux-3.0.4/drivers/staging/hv/vmbus_drv.c
32061--- linux-3.0.4/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
32062+++ linux-3.0.4/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
32063@@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
32064 {
32065 int ret = 0;
32066
32067- static atomic_t device_num = ATOMIC_INIT(0);
32068+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32069
32070 /* Set the device name. Otherwise, device_register() will fail. */
32071 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32072- atomic_inc_return(&device_num));
32073+ atomic_inc_return_unchecked(&device_num));
32074
32075 /* The new device belongs to this bus */
32076 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
32077diff -urNp linux-3.0.4/drivers/staging/iio/ring_generic.h linux-3.0.4/drivers/staging/iio/ring_generic.h
32078--- linux-3.0.4/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
32079+++ linux-3.0.4/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
32080@@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
32081
32082 int (*is_enabled)(struct iio_ring_buffer *ring);
32083 int (*enable)(struct iio_ring_buffer *ring);
32084-};
32085+} __no_const;
32086
32087 struct iio_ring_setup_ops {
32088 int (*preenable)(struct iio_dev *);
32089diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet.c linux-3.0.4/drivers/staging/octeon/ethernet.c
32090--- linux-3.0.4/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
32091+++ linux-3.0.4/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
32092@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32093 * since the RX tasklet also increments it.
32094 */
32095 #ifdef CONFIG_64BIT
32096- atomic64_add(rx_status.dropped_packets,
32097- (atomic64_t *)&priv->stats.rx_dropped);
32098+ atomic64_add_unchecked(rx_status.dropped_packets,
32099+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32100 #else
32101- atomic_add(rx_status.dropped_packets,
32102- (atomic_t *)&priv->stats.rx_dropped);
32103+ atomic_add_unchecked(rx_status.dropped_packets,
32104+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
32105 #endif
32106 }
32107
32108diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet-rx.c linux-3.0.4/drivers/staging/octeon/ethernet-rx.c
32109--- linux-3.0.4/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
32110+++ linux-3.0.4/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
32111@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32112 /* Increment RX stats for virtual ports */
32113 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32114 #ifdef CONFIG_64BIT
32115- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32116- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32117+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32118+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32119 #else
32120- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32121- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32122+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32123+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32124 #endif
32125 }
32126 netif_receive_skb(skb);
32127@@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32128 dev->name);
32129 */
32130 #ifdef CONFIG_64BIT
32131- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32132+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32133 #else
32134- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32135+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32136 #endif
32137 dev_kfree_skb_irq(skb);
32138 }
32139diff -urNp linux-3.0.4/drivers/staging/pohmelfs/inode.c linux-3.0.4/drivers/staging/pohmelfs/inode.c
32140--- linux-3.0.4/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
32141+++ linux-3.0.4/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
32142@@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
32143 mutex_init(&psb->mcache_lock);
32144 psb->mcache_root = RB_ROOT;
32145 psb->mcache_timeout = msecs_to_jiffies(5000);
32146- atomic_long_set(&psb->mcache_gen, 0);
32147+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
32148
32149 psb->trans_max_pages = 100;
32150
32151@@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
32152 INIT_LIST_HEAD(&psb->crypto_ready_list);
32153 INIT_LIST_HEAD(&psb->crypto_active_list);
32154
32155- atomic_set(&psb->trans_gen, 1);
32156+ atomic_set_unchecked(&psb->trans_gen, 1);
32157 atomic_long_set(&psb->total_inodes, 0);
32158
32159 mutex_init(&psb->state_lock);
32160diff -urNp linux-3.0.4/drivers/staging/pohmelfs/mcache.c linux-3.0.4/drivers/staging/pohmelfs/mcache.c
32161--- linux-3.0.4/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
32162+++ linux-3.0.4/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
32163@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32164 m->data = data;
32165 m->start = start;
32166 m->size = size;
32167- m->gen = atomic_long_inc_return(&psb->mcache_gen);
32168+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32169
32170 mutex_lock(&psb->mcache_lock);
32171 err = pohmelfs_mcache_insert(psb, m);
32172diff -urNp linux-3.0.4/drivers/staging/pohmelfs/netfs.h linux-3.0.4/drivers/staging/pohmelfs/netfs.h
32173--- linux-3.0.4/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
32174+++ linux-3.0.4/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
32175@@ -571,14 +571,14 @@ struct pohmelfs_config;
32176 struct pohmelfs_sb {
32177 struct rb_root mcache_root;
32178 struct mutex mcache_lock;
32179- atomic_long_t mcache_gen;
32180+ atomic_long_unchecked_t mcache_gen;
32181 unsigned long mcache_timeout;
32182
32183 unsigned int idx;
32184
32185 unsigned int trans_retries;
32186
32187- atomic_t trans_gen;
32188+ atomic_unchecked_t trans_gen;
32189
32190 unsigned int crypto_attached_size;
32191 unsigned int crypto_align_size;
32192diff -urNp linux-3.0.4/drivers/staging/pohmelfs/trans.c linux-3.0.4/drivers/staging/pohmelfs/trans.c
32193--- linux-3.0.4/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
32194+++ linux-3.0.4/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
32195@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32196 int err;
32197 struct netfs_cmd *cmd = t->iovec.iov_base;
32198
32199- t->gen = atomic_inc_return(&psb->trans_gen);
32200+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32201
32202 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32203 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32204diff -urNp linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h
32205--- linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
32206+++ linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
32207@@ -83,7 +83,7 @@ struct _io_ops {
32208 u8 *pmem);
32209 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32210 u8 *pmem);
32211-};
32212+} __no_const;
32213
32214 struct io_req {
32215 struct list_head list;
32216diff -urNp linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c
32217--- linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
32218+++ linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c 2011-08-24 18:21:41.000000000 -0400
32219@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
32220 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
32221
32222 if (rlen)
32223- if (copy_to_user(data, &resp, rlen))
32224+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
32225 return -EFAULT;
32226
32227 return 0;
32228diff -urNp linux-3.0.4/drivers/staging/tty/stallion.c linux-3.0.4/drivers/staging/tty/stallion.c
32229--- linux-3.0.4/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
32230+++ linux-3.0.4/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
32231@@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32232 struct stlport stl_dummyport;
32233 struct stlport *portp;
32234
32235+ pax_track_stack();
32236+
32237 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32238 return -EFAULT;
32239 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32240diff -urNp linux-3.0.4/drivers/staging/usbip/usbip_common.h linux-3.0.4/drivers/staging/usbip/usbip_common.h
32241--- linux-3.0.4/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
32242+++ linux-3.0.4/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
32243@@ -315,7 +315,7 @@ struct usbip_device {
32244 void (*shutdown)(struct usbip_device *);
32245 void (*reset)(struct usbip_device *);
32246 void (*unusable)(struct usbip_device *);
32247- } eh_ops;
32248+ } __no_const eh_ops;
32249 };
32250
32251 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
32252diff -urNp linux-3.0.4/drivers/staging/usbip/vhci.h linux-3.0.4/drivers/staging/usbip/vhci.h
32253--- linux-3.0.4/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
32254+++ linux-3.0.4/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
32255@@ -94,7 +94,7 @@ struct vhci_hcd {
32256 unsigned resuming:1;
32257 unsigned long re_timeout;
32258
32259- atomic_t seqnum;
32260+ atomic_unchecked_t seqnum;
32261
32262 /*
32263 * NOTE:
32264diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_hcd.c linux-3.0.4/drivers/staging/usbip/vhci_hcd.c
32265--- linux-3.0.4/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:44:40.000000000 -0400
32266+++ linux-3.0.4/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
32267@@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
32268 return;
32269 }
32270
32271- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32272+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32273 if (priv->seqnum == 0xffff)
32274 dev_info(&urb->dev->dev, "seqnum max\n");
32275
32276@@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
32277 return -ENOMEM;
32278 }
32279
32280- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32281+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32282 if (unlink->seqnum == 0xffff)
32283 pr_info("seqnum max\n");
32284
32285@@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
32286 vdev->rhport = rhport;
32287 }
32288
32289- atomic_set(&vhci->seqnum, 0);
32290+ atomic_set_unchecked(&vhci->seqnum, 0);
32291 spin_lock_init(&vhci->lock);
32292
32293 hcd->power_budget = 0; /* no limit */
32294diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_rx.c linux-3.0.4/drivers/staging/usbip/vhci_rx.c
32295--- linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
32296+++ linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
32297@@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
32298 if (!urb) {
32299 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
32300 pr_info("max seqnum %d\n",
32301- atomic_read(&the_controller->seqnum));
32302+ atomic_read_unchecked(&the_controller->seqnum));
32303 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32304 return;
32305 }
32306diff -urNp linux-3.0.4/drivers/staging/vt6655/hostap.c linux-3.0.4/drivers/staging/vt6655/hostap.c
32307--- linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
32308+++ linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
32309@@ -79,14 +79,13 @@ static int msglevel
32310 *
32311 */
32312
32313+static net_device_ops_no_const apdev_netdev_ops;
32314+
32315 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32316 {
32317 PSDevice apdev_priv;
32318 struct net_device *dev = pDevice->dev;
32319 int ret;
32320- const struct net_device_ops apdev_netdev_ops = {
32321- .ndo_start_xmit = pDevice->tx_80211,
32322- };
32323
32324 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32325
32326@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
32327 *apdev_priv = *pDevice;
32328 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32329
32330+ /* only half broken now */
32331+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32332 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32333
32334 pDevice->apdev->type = ARPHRD_IEEE80211;
32335diff -urNp linux-3.0.4/drivers/staging/vt6656/hostap.c linux-3.0.4/drivers/staging/vt6656/hostap.c
32336--- linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
32337+++ linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
32338@@ -80,14 +80,13 @@ static int msglevel
32339 *
32340 */
32341
32342+static net_device_ops_no_const apdev_netdev_ops;
32343+
32344 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32345 {
32346 PSDevice apdev_priv;
32347 struct net_device *dev = pDevice->dev;
32348 int ret;
32349- const struct net_device_ops apdev_netdev_ops = {
32350- .ndo_start_xmit = pDevice->tx_80211,
32351- };
32352
32353 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32354
32355@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
32356 *apdev_priv = *pDevice;
32357 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32358
32359+ /* only half broken now */
32360+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32361 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32362
32363 pDevice->apdev->type = ARPHRD_IEEE80211;
32364diff -urNp linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c
32365--- linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
32366+++ linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
32367@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32368
32369 struct usbctlx_completor {
32370 int (*complete) (struct usbctlx_completor *);
32371-};
32372+} __no_const;
32373
32374 static int
32375 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32376diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.c linux-3.0.4/drivers/staging/zcache/tmem.c
32377--- linux-3.0.4/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
32378+++ linux-3.0.4/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
32379@@ -39,7 +39,7 @@
32380 * A tmem host implementation must use this function to register callbacks
32381 * for memory allocation.
32382 */
32383-static struct tmem_hostops tmem_hostops;
32384+static tmem_hostops_no_const tmem_hostops;
32385
32386 static void tmem_objnode_tree_init(void);
32387
32388@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
32389 * A tmem host implementation must use this function to register
32390 * callbacks for a page-accessible memory (PAM) implementation
32391 */
32392-static struct tmem_pamops tmem_pamops;
32393+static tmem_pamops_no_const tmem_pamops;
32394
32395 void tmem_register_pamops(struct tmem_pamops *m)
32396 {
32397diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.h linux-3.0.4/drivers/staging/zcache/tmem.h
32398--- linux-3.0.4/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
32399+++ linux-3.0.4/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
32400@@ -171,6 +171,7 @@ struct tmem_pamops {
32401 int (*get_data)(struct page *, void *, struct tmem_pool *);
32402 void (*free)(void *, struct tmem_pool *);
32403 };
32404+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
32405 extern void tmem_register_pamops(struct tmem_pamops *m);
32406
32407 /* memory allocation methods provided by the host implementation */
32408@@ -180,6 +181,7 @@ struct tmem_hostops {
32409 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
32410 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
32411 };
32412+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
32413 extern void tmem_register_hostops(struct tmem_hostops *m);
32414
32415 /* core tmem accessor functions */
32416diff -urNp linux-3.0.4/drivers/target/target_core_alua.c linux-3.0.4/drivers/target/target_core_alua.c
32417--- linux-3.0.4/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
32418+++ linux-3.0.4/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
32419@@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32420 char path[ALUA_METADATA_PATH_LEN];
32421 int len;
32422
32423+ pax_track_stack();
32424+
32425 memset(path, 0, ALUA_METADATA_PATH_LEN);
32426
32427 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32428@@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32429 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32430 int len;
32431
32432+ pax_track_stack();
32433+
32434 memset(path, 0, ALUA_METADATA_PATH_LEN);
32435 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32436
32437diff -urNp linux-3.0.4/drivers/target/target_core_cdb.c linux-3.0.4/drivers/target/target_core_cdb.c
32438--- linux-3.0.4/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
32439+++ linux-3.0.4/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
32440@@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32441 int length = 0;
32442 unsigned char buf[SE_MODE_PAGE_BUF];
32443
32444+ pax_track_stack();
32445+
32446 memset(buf, 0, SE_MODE_PAGE_BUF);
32447
32448 switch (cdb[2] & 0x3f) {
32449diff -urNp linux-3.0.4/drivers/target/target_core_configfs.c linux-3.0.4/drivers/target/target_core_configfs.c
32450--- linux-3.0.4/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
32451+++ linux-3.0.4/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
32452@@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
32453 ssize_t len = 0;
32454 int reg_count = 0, prf_isid;
32455
32456+ pax_track_stack();
32457+
32458 if (!(su_dev->se_dev_ptr))
32459 return -ENODEV;
32460
32461diff -urNp linux-3.0.4/drivers/target/target_core_pr.c linux-3.0.4/drivers/target/target_core_pr.c
32462--- linux-3.0.4/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
32463+++ linux-3.0.4/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
32464@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
32465 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
32466 u16 tpgt;
32467
32468+ pax_track_stack();
32469+
32470 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
32471 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
32472 /*
32473@@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
32474 ssize_t len = 0;
32475 int reg_count = 0;
32476
32477+ pax_track_stack();
32478+
32479 memset(buf, 0, pr_aptpl_buf_len);
32480 /*
32481 * Called to clear metadata once APTPL has been deactivated.
32482@@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
32483 char path[512];
32484 int ret;
32485
32486+ pax_track_stack();
32487+
32488 memset(iov, 0, sizeof(struct iovec));
32489 memset(path, 0, 512);
32490
32491diff -urNp linux-3.0.4/drivers/target/target_core_tmr.c linux-3.0.4/drivers/target/target_core_tmr.c
32492--- linux-3.0.4/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
32493+++ linux-3.0.4/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
32494@@ -269,7 +269,7 @@ int core_tmr_lun_reset(
32495 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
32496 T_TASK(cmd)->t_task_cdbs,
32497 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32498- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32499+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32500 atomic_read(&T_TASK(cmd)->t_transport_active),
32501 atomic_read(&T_TASK(cmd)->t_transport_stop),
32502 atomic_read(&T_TASK(cmd)->t_transport_sent));
32503@@ -311,7 +311,7 @@ int core_tmr_lun_reset(
32504 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
32505 " task: %p, t_fe_count: %d dev: %p\n", task,
32506 fe_count, dev);
32507- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32508+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32509 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
32510 flags);
32511 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32512@@ -321,7 +321,7 @@ int core_tmr_lun_reset(
32513 }
32514 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
32515 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
32516- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32517+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32518 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
32519 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32520
32521diff -urNp linux-3.0.4/drivers/target/target_core_transport.c linux-3.0.4/drivers/target/target_core_transport.c
32522--- linux-3.0.4/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
32523+++ linux-3.0.4/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
32524@@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
32525
32526 dev->queue_depth = dev_limits->queue_depth;
32527 atomic_set(&dev->depth_left, dev->queue_depth);
32528- atomic_set(&dev->dev_ordered_id, 0);
32529+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
32530
32531 se_dev_set_default_attribs(dev, dev_limits);
32532
32533@@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
32534 * Used to determine when ORDERED commands should go from
32535 * Dormant to Active status.
32536 */
32537- cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
32538+ cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
32539 smp_mb__after_atomic_inc();
32540 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
32541 cmd->se_ordered_id, cmd->sam_task_attr,
32542@@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
32543 " t_transport_active: %d t_transport_stop: %d"
32544 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
32545 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32546- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32547+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32548 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
32549 atomic_read(&T_TASK(cmd)->t_transport_active),
32550 atomic_read(&T_TASK(cmd)->t_transport_stop),
32551@@ -2673,9 +2673,9 @@ check_depth:
32552 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
32553 atomic_set(&task->task_active, 1);
32554 atomic_set(&task->task_sent, 1);
32555- atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
32556+ atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
32557
32558- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
32559+ if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
32560 T_TASK(cmd)->t_task_cdbs)
32561 atomic_set(&cmd->transport_sent, 1);
32562
32563@@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
32564 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
32565 }
32566 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
32567- atomic_read(&T_TASK(cmd)->t_transport_aborted))
32568+ atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
32569 goto remove;
32570
32571 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
32572@@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
32573 {
32574 int ret = 0;
32575
32576- if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
32577+ if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
32578 if (!(send_status) ||
32579 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
32580 return 1;
32581@@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
32582 */
32583 if (cmd->data_direction == DMA_TO_DEVICE) {
32584 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
32585- atomic_inc(&T_TASK(cmd)->t_transport_aborted);
32586+ atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
32587 smp_mb__after_atomic_inc();
32588 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
32589 transport_new_cmd_failure(cmd);
32590@@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
32591 CMD_TFO(cmd)->get_task_tag(cmd),
32592 T_TASK(cmd)->t_task_cdbs,
32593 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32594- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32595+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32596 atomic_read(&T_TASK(cmd)->t_transport_active),
32597 atomic_read(&T_TASK(cmd)->t_transport_stop),
32598 atomic_read(&T_TASK(cmd)->t_transport_sent));
32599diff -urNp linux-3.0.4/drivers/telephony/ixj.c linux-3.0.4/drivers/telephony/ixj.c
32600--- linux-3.0.4/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
32601+++ linux-3.0.4/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
32602@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
32603 bool mContinue;
32604 char *pIn, *pOut;
32605
32606+ pax_track_stack();
32607+
32608 if (!SCI_Prepare(j))
32609 return 0;
32610
32611diff -urNp linux-3.0.4/drivers/tty/hvc/hvcs.c linux-3.0.4/drivers/tty/hvc/hvcs.c
32612--- linux-3.0.4/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
32613+++ linux-3.0.4/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
32614@@ -83,6 +83,7 @@
32615 #include <asm/hvcserver.h>
32616 #include <asm/uaccess.h>
32617 #include <asm/vio.h>
32618+#include <asm/local.h>
32619
32620 /*
32621 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32622@@ -270,7 +271,7 @@ struct hvcs_struct {
32623 unsigned int index;
32624
32625 struct tty_struct *tty;
32626- int open_count;
32627+ local_t open_count;
32628
32629 /*
32630 * Used to tell the driver kernel_thread what operations need to take
32631@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
32632
32633 spin_lock_irqsave(&hvcsd->lock, flags);
32634
32635- if (hvcsd->open_count > 0) {
32636+ if (local_read(&hvcsd->open_count) > 0) {
32637 spin_unlock_irqrestore(&hvcsd->lock, flags);
32638 printk(KERN_INFO "HVCS: vterm state unchanged. "
32639 "The hvcs device node is still in use.\n");
32640@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
32641 if ((retval = hvcs_partner_connect(hvcsd)))
32642 goto error_release;
32643
32644- hvcsd->open_count = 1;
32645+ local_set(&hvcsd->open_count, 1);
32646 hvcsd->tty = tty;
32647 tty->driver_data = hvcsd;
32648
32649@@ -1179,7 +1180,7 @@ fast_open:
32650
32651 spin_lock_irqsave(&hvcsd->lock, flags);
32652 kref_get(&hvcsd->kref);
32653- hvcsd->open_count++;
32654+ local_inc(&hvcsd->open_count);
32655 hvcsd->todo_mask |= HVCS_SCHED_READ;
32656 spin_unlock_irqrestore(&hvcsd->lock, flags);
32657
32658@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
32659 hvcsd = tty->driver_data;
32660
32661 spin_lock_irqsave(&hvcsd->lock, flags);
32662- if (--hvcsd->open_count == 0) {
32663+ if (local_dec_and_test(&hvcsd->open_count)) {
32664
32665 vio_disable_interrupts(hvcsd->vdev);
32666
32667@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
32668 free_irq(irq, hvcsd);
32669 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32670 return;
32671- } else if (hvcsd->open_count < 0) {
32672+ } else if (local_read(&hvcsd->open_count) < 0) {
32673 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32674 " is missmanaged.\n",
32675- hvcsd->vdev->unit_address, hvcsd->open_count);
32676+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32677 }
32678
32679 spin_unlock_irqrestore(&hvcsd->lock, flags);
32680@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
32681
32682 spin_lock_irqsave(&hvcsd->lock, flags);
32683 /* Preserve this so that we know how many kref refs to put */
32684- temp_open_count = hvcsd->open_count;
32685+ temp_open_count = local_read(&hvcsd->open_count);
32686
32687 /*
32688 * Don't kref put inside the spinlock because the destruction
32689@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
32690 hvcsd->tty->driver_data = NULL;
32691 hvcsd->tty = NULL;
32692
32693- hvcsd->open_count = 0;
32694+ local_set(&hvcsd->open_count, 0);
32695
32696 /* This will drop any buffered data on the floor which is OK in a hangup
32697 * scenario. */
32698@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
32699 * the middle of a write operation? This is a crummy place to do this
32700 * but we want to keep it all in the spinlock.
32701 */
32702- if (hvcsd->open_count <= 0) {
32703+ if (local_read(&hvcsd->open_count) <= 0) {
32704 spin_unlock_irqrestore(&hvcsd->lock, flags);
32705 return -ENODEV;
32706 }
32707@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
32708 {
32709 struct hvcs_struct *hvcsd = tty->driver_data;
32710
32711- if (!hvcsd || hvcsd->open_count <= 0)
32712+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32713 return 0;
32714
32715 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32716diff -urNp linux-3.0.4/drivers/tty/ipwireless/tty.c linux-3.0.4/drivers/tty/ipwireless/tty.c
32717--- linux-3.0.4/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
32718+++ linux-3.0.4/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
32719@@ -29,6 +29,7 @@
32720 #include <linux/tty_driver.h>
32721 #include <linux/tty_flip.h>
32722 #include <linux/uaccess.h>
32723+#include <asm/local.h>
32724
32725 #include "tty.h"
32726 #include "network.h"
32727@@ -51,7 +52,7 @@ struct ipw_tty {
32728 int tty_type;
32729 struct ipw_network *network;
32730 struct tty_struct *linux_tty;
32731- int open_count;
32732+ local_t open_count;
32733 unsigned int control_lines;
32734 struct mutex ipw_tty_mutex;
32735 int tx_bytes_queued;
32736@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
32737 mutex_unlock(&tty->ipw_tty_mutex);
32738 return -ENODEV;
32739 }
32740- if (tty->open_count == 0)
32741+ if (local_read(&tty->open_count) == 0)
32742 tty->tx_bytes_queued = 0;
32743
32744- tty->open_count++;
32745+ local_inc(&tty->open_count);
32746
32747 tty->linux_tty = linux_tty;
32748 linux_tty->driver_data = tty;
32749@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
32750
32751 static void do_ipw_close(struct ipw_tty *tty)
32752 {
32753- tty->open_count--;
32754-
32755- if (tty->open_count == 0) {
32756+ if (local_dec_return(&tty->open_count) == 0) {
32757 struct tty_struct *linux_tty = tty->linux_tty;
32758
32759 if (linux_tty != NULL) {
32760@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
32761 return;
32762
32763 mutex_lock(&tty->ipw_tty_mutex);
32764- if (tty->open_count == 0) {
32765+ if (local_read(&tty->open_count) == 0) {
32766 mutex_unlock(&tty->ipw_tty_mutex);
32767 return;
32768 }
32769@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
32770 return;
32771 }
32772
32773- if (!tty->open_count) {
32774+ if (!local_read(&tty->open_count)) {
32775 mutex_unlock(&tty->ipw_tty_mutex);
32776 return;
32777 }
32778@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
32779 return -ENODEV;
32780
32781 mutex_lock(&tty->ipw_tty_mutex);
32782- if (!tty->open_count) {
32783+ if (!local_read(&tty->open_count)) {
32784 mutex_unlock(&tty->ipw_tty_mutex);
32785 return -EINVAL;
32786 }
32787@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
32788 if (!tty)
32789 return -ENODEV;
32790
32791- if (!tty->open_count)
32792+ if (!local_read(&tty->open_count))
32793 return -EINVAL;
32794
32795 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32796@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
32797 if (!tty)
32798 return 0;
32799
32800- if (!tty->open_count)
32801+ if (!local_read(&tty->open_count))
32802 return 0;
32803
32804 return tty->tx_bytes_queued;
32805@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
32806 if (!tty)
32807 return -ENODEV;
32808
32809- if (!tty->open_count)
32810+ if (!local_read(&tty->open_count))
32811 return -EINVAL;
32812
32813 return get_control_lines(tty);
32814@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
32815 if (!tty)
32816 return -ENODEV;
32817
32818- if (!tty->open_count)
32819+ if (!local_read(&tty->open_count))
32820 return -EINVAL;
32821
32822 return set_control_lines(tty, set, clear);
32823@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
32824 if (!tty)
32825 return -ENODEV;
32826
32827- if (!tty->open_count)
32828+ if (!local_read(&tty->open_count))
32829 return -EINVAL;
32830
32831 /* FIXME: Exactly how is the tty object locked here .. */
32832@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
32833 against a parallel ioctl etc */
32834 mutex_lock(&ttyj->ipw_tty_mutex);
32835 }
32836- while (ttyj->open_count)
32837+ while (local_read(&ttyj->open_count))
32838 do_ipw_close(ttyj);
32839 ipwireless_disassociate_network_ttys(network,
32840 ttyj->channel_idx);
32841diff -urNp linux-3.0.4/drivers/tty/n_gsm.c linux-3.0.4/drivers/tty/n_gsm.c
32842--- linux-3.0.4/drivers/tty/n_gsm.c 2011-08-23 21:44:40.000000000 -0400
32843+++ linux-3.0.4/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
32844@@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
32845 return NULL;
32846 spin_lock_init(&dlci->lock);
32847 dlci->fifo = &dlci->_fifo;
32848- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
32849+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
32850 kfree(dlci);
32851 return NULL;
32852 }
32853diff -urNp linux-3.0.4/drivers/tty/n_tty.c linux-3.0.4/drivers/tty/n_tty.c
32854--- linux-3.0.4/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
32855+++ linux-3.0.4/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
32856@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
32857 {
32858 *ops = tty_ldisc_N_TTY;
32859 ops->owner = NULL;
32860- ops->refcount = ops->flags = 0;
32861+ atomic_set(&ops->refcount, 0);
32862+ ops->flags = 0;
32863 }
32864 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
32865diff -urNp linux-3.0.4/drivers/tty/pty.c linux-3.0.4/drivers/tty/pty.c
32866--- linux-3.0.4/drivers/tty/pty.c 2011-07-21 22:17:23.000000000 -0400
32867+++ linux-3.0.4/drivers/tty/pty.c 2011-08-23 21:47:56.000000000 -0400
32868@@ -754,8 +754,10 @@ static void __init unix98_pty_init(void)
32869 register_sysctl_table(pty_root_table);
32870
32871 /* Now create the /dev/ptmx special device */
32872+ pax_open_kernel();
32873 tty_default_fops(&ptmx_fops);
32874- ptmx_fops.open = ptmx_open;
32875+ *(void **)&ptmx_fops.open = ptmx_open;
32876+ pax_close_kernel();
32877
32878 cdev_init(&ptmx_cdev, &ptmx_fops);
32879 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
32880diff -urNp linux-3.0.4/drivers/tty/rocket.c linux-3.0.4/drivers/tty/rocket.c
32881--- linux-3.0.4/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
32882+++ linux-3.0.4/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
32883@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
32884 struct rocket_ports tmp;
32885 int board;
32886
32887+ pax_track_stack();
32888+
32889 if (!retports)
32890 return -EFAULT;
32891 memset(&tmp, 0, sizeof (tmp));
32892diff -urNp linux-3.0.4/drivers/tty/serial/kgdboc.c linux-3.0.4/drivers/tty/serial/kgdboc.c
32893--- linux-3.0.4/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
32894+++ linux-3.0.4/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
32895@@ -23,8 +23,9 @@
32896 #define MAX_CONFIG_LEN 40
32897
32898 static struct kgdb_io kgdboc_io_ops;
32899+static struct kgdb_io kgdboc_io_ops_console;
32900
32901-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
32902+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
32903 static int configured = -1;
32904
32905 static char config[MAX_CONFIG_LEN];
32906@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
32907 kgdboc_unregister_kbd();
32908 if (configured == 1)
32909 kgdb_unregister_io_module(&kgdboc_io_ops);
32910+ else if (configured == 2)
32911+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
32912 }
32913
32914 static int configure_kgdboc(void)
32915@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
32916 int err;
32917 char *cptr = config;
32918 struct console *cons;
32919+ int is_console = 0;
32920
32921 err = kgdboc_option_setup(config);
32922 if (err || !strlen(config) || isspace(config[0]))
32923 goto noconfig;
32924
32925 err = -ENODEV;
32926- kgdboc_io_ops.is_console = 0;
32927 kgdb_tty_driver = NULL;
32928
32929 kgdboc_use_kms = 0;
32930@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
32931 int idx;
32932 if (cons->device && cons->device(cons, &idx) == p &&
32933 idx == tty_line) {
32934- kgdboc_io_ops.is_console = 1;
32935+ is_console = 1;
32936 break;
32937 }
32938 cons = cons->next;
32939@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
32940 kgdb_tty_line = tty_line;
32941
32942 do_register:
32943- err = kgdb_register_io_module(&kgdboc_io_ops);
32944+ if (is_console) {
32945+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
32946+ configured = 2;
32947+ } else {
32948+ err = kgdb_register_io_module(&kgdboc_io_ops);
32949+ configured = 1;
32950+ }
32951 if (err)
32952 goto noconfig;
32953
32954- configured = 1;
32955-
32956 return 0;
32957
32958 noconfig:
32959@@ -212,7 +219,7 @@ noconfig:
32960 static int __init init_kgdboc(void)
32961 {
32962 /* Already configured? */
32963- if (configured == 1)
32964+ if (configured >= 1)
32965 return 0;
32966
32967 return configure_kgdboc();
32968@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
32969 if (config[len - 1] == '\n')
32970 config[len - 1] = '\0';
32971
32972- if (configured == 1)
32973+ if (configured >= 1)
32974 cleanup_kgdboc();
32975
32976 /* Go and configure with the new params. */
32977@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
32978 .post_exception = kgdboc_post_exp_handler,
32979 };
32980
32981+static struct kgdb_io kgdboc_io_ops_console = {
32982+ .name = "kgdboc",
32983+ .read_char = kgdboc_get_char,
32984+ .write_char = kgdboc_put_char,
32985+ .pre_exception = kgdboc_pre_exp_handler,
32986+ .post_exception = kgdboc_post_exp_handler,
32987+ .is_console = 1
32988+};
32989+
32990 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
32991 /* This is only available if kgdboc is a built in for early debugging */
32992 static int __init kgdboc_early_init(char *opt)
32993diff -urNp linux-3.0.4/drivers/tty/serial/mrst_max3110.c linux-3.0.4/drivers/tty/serial/mrst_max3110.c
32994--- linux-3.0.4/drivers/tty/serial/mrst_max3110.c 2011-07-21 22:17:23.000000000 -0400
32995+++ linux-3.0.4/drivers/tty/serial/mrst_max3110.c 2011-08-23 21:48:14.000000000 -0400
32996@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
32997 int loop = 1, num, total = 0;
32998 u8 recv_buf[512], *pbuf;
32999
33000+ pax_track_stack();
33001+
33002 pbuf = recv_buf;
33003 do {
33004 num = max3110_read_multi(max, pbuf);
33005diff -urNp linux-3.0.4/drivers/tty/tty_io.c linux-3.0.4/drivers/tty/tty_io.c
33006--- linux-3.0.4/drivers/tty/tty_io.c 2011-07-21 22:17:23.000000000 -0400
33007+++ linux-3.0.4/drivers/tty/tty_io.c 2011-08-23 21:47:56.000000000 -0400
33008@@ -3215,7 +3215,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33009
33010 void tty_default_fops(struct file_operations *fops)
33011 {
33012- *fops = tty_fops;
33013+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33014 }
33015
33016 /*
33017diff -urNp linux-3.0.4/drivers/tty/tty_ldisc.c linux-3.0.4/drivers/tty/tty_ldisc.c
33018--- linux-3.0.4/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
33019+++ linux-3.0.4/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
33020@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33021 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33022 struct tty_ldisc_ops *ldo = ld->ops;
33023
33024- ldo->refcount--;
33025+ atomic_dec(&ldo->refcount);
33026 module_put(ldo->owner);
33027 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33028
33029@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
33030 spin_lock_irqsave(&tty_ldisc_lock, flags);
33031 tty_ldiscs[disc] = new_ldisc;
33032 new_ldisc->num = disc;
33033- new_ldisc->refcount = 0;
33034+ atomic_set(&new_ldisc->refcount, 0);
33035 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33036
33037 return ret;
33038@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33039 return -EINVAL;
33040
33041 spin_lock_irqsave(&tty_ldisc_lock, flags);
33042- if (tty_ldiscs[disc]->refcount)
33043+ if (atomic_read(&tty_ldiscs[disc]->refcount))
33044 ret = -EBUSY;
33045 else
33046 tty_ldiscs[disc] = NULL;
33047@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33048 if (ldops) {
33049 ret = ERR_PTR(-EAGAIN);
33050 if (try_module_get(ldops->owner)) {
33051- ldops->refcount++;
33052+ atomic_inc(&ldops->refcount);
33053 ret = ldops;
33054 }
33055 }
33056@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33057 unsigned long flags;
33058
33059 spin_lock_irqsave(&tty_ldisc_lock, flags);
33060- ldops->refcount--;
33061+ atomic_dec(&ldops->refcount);
33062 module_put(ldops->owner);
33063 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33064 }
33065diff -urNp linux-3.0.4/drivers/tty/vt/keyboard.c linux-3.0.4/drivers/tty/vt/keyboard.c
33066--- linux-3.0.4/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
33067+++ linux-3.0.4/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
33068@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
33069 kbd->kbdmode == VC_OFF) &&
33070 value != KVAL(K_SAK))
33071 return; /* SAK is allowed even in raw mode */
33072+
33073+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33074+ {
33075+ void *func = fn_handler[value];
33076+ if (func == fn_show_state || func == fn_show_ptregs ||
33077+ func == fn_show_mem)
33078+ return;
33079+ }
33080+#endif
33081+
33082 fn_handler[value](vc);
33083 }
33084
33085diff -urNp linux-3.0.4/drivers/tty/vt/vt.c linux-3.0.4/drivers/tty/vt/vt.c
33086--- linux-3.0.4/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
33087+++ linux-3.0.4/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
33088@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33089
33090 static void notify_write(struct vc_data *vc, unsigned int unicode)
33091 {
33092- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33093+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
33094 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33095 }
33096
33097diff -urNp linux-3.0.4/drivers/tty/vt/vt_ioctl.c linux-3.0.4/drivers/tty/vt/vt_ioctl.c
33098--- linux-3.0.4/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
33099+++ linux-3.0.4/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
33100@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33101 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33102 return -EFAULT;
33103
33104- if (!capable(CAP_SYS_TTY_CONFIG))
33105- perm = 0;
33106-
33107 switch (cmd) {
33108 case KDGKBENT:
33109 key_map = key_maps[s];
33110@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33111 val = (i ? K_HOLE : K_NOSUCHMAP);
33112 return put_user(val, &user_kbe->kb_value);
33113 case KDSKBENT:
33114+ if (!capable(CAP_SYS_TTY_CONFIG))
33115+ perm = 0;
33116+
33117 if (!perm)
33118 return -EPERM;
33119 if (!i && v == K_NOSUCHMAP) {
33120@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33121 int i, j, k;
33122 int ret;
33123
33124- if (!capable(CAP_SYS_TTY_CONFIG))
33125- perm = 0;
33126-
33127 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33128 if (!kbs) {
33129 ret = -ENOMEM;
33130@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33131 kfree(kbs);
33132 return ((p && *p) ? -EOVERFLOW : 0);
33133 case KDSKBSENT:
33134+ if (!capable(CAP_SYS_TTY_CONFIG))
33135+ perm = 0;
33136+
33137 if (!perm) {
33138 ret = -EPERM;
33139 goto reterr;
33140diff -urNp linux-3.0.4/drivers/uio/uio.c linux-3.0.4/drivers/uio/uio.c
33141--- linux-3.0.4/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
33142+++ linux-3.0.4/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
33143@@ -25,6 +25,7 @@
33144 #include <linux/kobject.h>
33145 #include <linux/cdev.h>
33146 #include <linux/uio_driver.h>
33147+#include <asm/local.h>
33148
33149 #define UIO_MAX_DEVICES (1U << MINORBITS)
33150
33151@@ -32,10 +33,10 @@ struct uio_device {
33152 struct module *owner;
33153 struct device *dev;
33154 int minor;
33155- atomic_t event;
33156+ atomic_unchecked_t event;
33157 struct fasync_struct *async_queue;
33158 wait_queue_head_t wait;
33159- int vma_count;
33160+ local_t vma_count;
33161 struct uio_info *info;
33162 struct kobject *map_dir;
33163 struct kobject *portio_dir;
33164@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
33165 struct device_attribute *attr, char *buf)
33166 {
33167 struct uio_device *idev = dev_get_drvdata(dev);
33168- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33169+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33170 }
33171
33172 static struct device_attribute uio_class_attributes[] = {
33173@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
33174 {
33175 struct uio_device *idev = info->uio_dev;
33176
33177- atomic_inc(&idev->event);
33178+ atomic_inc_unchecked(&idev->event);
33179 wake_up_interruptible(&idev->wait);
33180 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33181 }
33182@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
33183 }
33184
33185 listener->dev = idev;
33186- listener->event_count = atomic_read(&idev->event);
33187+ listener->event_count = atomic_read_unchecked(&idev->event);
33188 filep->private_data = listener;
33189
33190 if (idev->info->open) {
33191@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
33192 return -EIO;
33193
33194 poll_wait(filep, &idev->wait, wait);
33195- if (listener->event_count != atomic_read(&idev->event))
33196+ if (listener->event_count != atomic_read_unchecked(&idev->event))
33197 return POLLIN | POLLRDNORM;
33198 return 0;
33199 }
33200@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
33201 do {
33202 set_current_state(TASK_INTERRUPTIBLE);
33203
33204- event_count = atomic_read(&idev->event);
33205+ event_count = atomic_read_unchecked(&idev->event);
33206 if (event_count != listener->event_count) {
33207 if (copy_to_user(buf, &event_count, count))
33208 retval = -EFAULT;
33209@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
33210 static void uio_vma_open(struct vm_area_struct *vma)
33211 {
33212 struct uio_device *idev = vma->vm_private_data;
33213- idev->vma_count++;
33214+ local_inc(&idev->vma_count);
33215 }
33216
33217 static void uio_vma_close(struct vm_area_struct *vma)
33218 {
33219 struct uio_device *idev = vma->vm_private_data;
33220- idev->vma_count--;
33221+ local_dec(&idev->vma_count);
33222 }
33223
33224 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33225@@ -823,7 +824,7 @@ int __uio_register_device(struct module
33226 idev->owner = owner;
33227 idev->info = info;
33228 init_waitqueue_head(&idev->wait);
33229- atomic_set(&idev->event, 0);
33230+ atomic_set_unchecked(&idev->event, 0);
33231
33232 ret = uio_get_minor(idev);
33233 if (ret)
33234diff -urNp linux-3.0.4/drivers/usb/atm/cxacru.c linux-3.0.4/drivers/usb/atm/cxacru.c
33235--- linux-3.0.4/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
33236+++ linux-3.0.4/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
33237@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33238 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33239 if (ret < 2)
33240 return -EINVAL;
33241- if (index < 0 || index > 0x7f)
33242+ if (index > 0x7f)
33243 return -EINVAL;
33244 pos += tmp;
33245
33246diff -urNp linux-3.0.4/drivers/usb/atm/usbatm.c linux-3.0.4/drivers/usb/atm/usbatm.c
33247--- linux-3.0.4/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
33248+++ linux-3.0.4/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
33249@@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33250 if (printk_ratelimit())
33251 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33252 __func__, vpi, vci);
33253- atomic_inc(&vcc->stats->rx_err);
33254+ atomic_inc_unchecked(&vcc->stats->rx_err);
33255 return;
33256 }
33257
33258@@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33259 if (length > ATM_MAX_AAL5_PDU) {
33260 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33261 __func__, length, vcc);
33262- atomic_inc(&vcc->stats->rx_err);
33263+ atomic_inc_unchecked(&vcc->stats->rx_err);
33264 goto out;
33265 }
33266
33267@@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33268 if (sarb->len < pdu_length) {
33269 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33270 __func__, pdu_length, sarb->len, vcc);
33271- atomic_inc(&vcc->stats->rx_err);
33272+ atomic_inc_unchecked(&vcc->stats->rx_err);
33273 goto out;
33274 }
33275
33276 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33277 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33278 __func__, vcc);
33279- atomic_inc(&vcc->stats->rx_err);
33280+ atomic_inc_unchecked(&vcc->stats->rx_err);
33281 goto out;
33282 }
33283
33284@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33285 if (printk_ratelimit())
33286 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33287 __func__, length);
33288- atomic_inc(&vcc->stats->rx_drop);
33289+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33290 goto out;
33291 }
33292
33293@@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33294
33295 vcc->push(vcc, skb);
33296
33297- atomic_inc(&vcc->stats->rx);
33298+ atomic_inc_unchecked(&vcc->stats->rx);
33299 out:
33300 skb_trim(sarb, 0);
33301 }
33302@@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33303 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33304
33305 usbatm_pop(vcc, skb);
33306- atomic_inc(&vcc->stats->tx);
33307+ atomic_inc_unchecked(&vcc->stats->tx);
33308
33309 skb = skb_dequeue(&instance->sndqueue);
33310 }
33311@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33312 if (!left--)
33313 return sprintf(page,
33314 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33315- atomic_read(&atm_dev->stats.aal5.tx),
33316- atomic_read(&atm_dev->stats.aal5.tx_err),
33317- atomic_read(&atm_dev->stats.aal5.rx),
33318- atomic_read(&atm_dev->stats.aal5.rx_err),
33319- atomic_read(&atm_dev->stats.aal5.rx_drop));
33320+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33321+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33322+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33323+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33324+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33325
33326 if (!left--) {
33327 if (instance->disconnected)
33328diff -urNp linux-3.0.4/drivers/usb/core/devices.c linux-3.0.4/drivers/usb/core/devices.c
33329--- linux-3.0.4/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
33330+++ linux-3.0.4/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
33331@@ -126,7 +126,7 @@ static const char format_endpt[] =
33332 * time it gets called.
33333 */
33334 static struct device_connect_event {
33335- atomic_t count;
33336+ atomic_unchecked_t count;
33337 wait_queue_head_t wait;
33338 } device_event = {
33339 .count = ATOMIC_INIT(1),
33340@@ -164,7 +164,7 @@ static const struct class_info clas_info
33341
33342 void usbfs_conn_disc_event(void)
33343 {
33344- atomic_add(2, &device_event.count);
33345+ atomic_add_unchecked(2, &device_event.count);
33346 wake_up(&device_event.wait);
33347 }
33348
33349@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33350
33351 poll_wait(file, &device_event.wait, wait);
33352
33353- event_count = atomic_read(&device_event.count);
33354+ event_count = atomic_read_unchecked(&device_event.count);
33355 if (file->f_version != event_count) {
33356 file->f_version = event_count;
33357 return POLLIN | POLLRDNORM;
33358diff -urNp linux-3.0.4/drivers/usb/core/message.c linux-3.0.4/drivers/usb/core/message.c
33359--- linux-3.0.4/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
33360+++ linux-3.0.4/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
33361@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33362 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33363 if (buf) {
33364 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33365- if (len > 0) {
33366- smallbuf = kmalloc(++len, GFP_NOIO);
33367+ if (len++ > 0) {
33368+ smallbuf = kmalloc(len, GFP_NOIO);
33369 if (!smallbuf)
33370 return buf;
33371 memcpy(smallbuf, buf, len);
33372diff -urNp linux-3.0.4/drivers/usb/early/ehci-dbgp.c linux-3.0.4/drivers/usb/early/ehci-dbgp.c
33373--- linux-3.0.4/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
33374+++ linux-3.0.4/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
33375@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33376
33377 #ifdef CONFIG_KGDB
33378 static struct kgdb_io kgdbdbgp_io_ops;
33379-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33380+static struct kgdb_io kgdbdbgp_io_ops_console;
33381+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33382 #else
33383 #define dbgp_kgdb_mode (0)
33384 #endif
33385@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33386 .write_char = kgdbdbgp_write_char,
33387 };
33388
33389+static struct kgdb_io kgdbdbgp_io_ops_console = {
33390+ .name = "kgdbdbgp",
33391+ .read_char = kgdbdbgp_read_char,
33392+ .write_char = kgdbdbgp_write_char,
33393+ .is_console = 1
33394+};
33395+
33396 static int kgdbdbgp_wait_time;
33397
33398 static int __init kgdbdbgp_parse_config(char *str)
33399@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
33400 ptr++;
33401 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33402 }
33403- kgdb_register_io_module(&kgdbdbgp_io_ops);
33404- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33405+ if (early_dbgp_console.index != -1)
33406+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33407+ else
33408+ kgdb_register_io_module(&kgdbdbgp_io_ops);
33409
33410 return 0;
33411 }
33412diff -urNp linux-3.0.4/drivers/usb/host/xhci-mem.c linux-3.0.4/drivers/usb/host/xhci-mem.c
33413--- linux-3.0.4/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
33414+++ linux-3.0.4/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
33415@@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
33416 unsigned int num_tests;
33417 int i, ret;
33418
33419+ pax_track_stack();
33420+
33421 num_tests = ARRAY_SIZE(simple_test_vector);
33422 for (i = 0; i < num_tests; i++) {
33423 ret = xhci_test_trb_in_td(xhci,
33424diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-hc.h linux-3.0.4/drivers/usb/wusbcore/wa-hc.h
33425--- linux-3.0.4/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
33426+++ linux-3.0.4/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
33427@@ -192,7 +192,7 @@ struct wahc {
33428 struct list_head xfer_delayed_list;
33429 spinlock_t xfer_list_lock;
33430 struct work_struct xfer_work;
33431- atomic_t xfer_id_count;
33432+ atomic_unchecked_t xfer_id_count;
33433 };
33434
33435
33436@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33437 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33438 spin_lock_init(&wa->xfer_list_lock);
33439 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33440- atomic_set(&wa->xfer_id_count, 1);
33441+ atomic_set_unchecked(&wa->xfer_id_count, 1);
33442 }
33443
33444 /**
33445diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c
33446--- linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
33447+++ linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
33448@@ -294,7 +294,7 @@ out:
33449 */
33450 static void wa_xfer_id_init(struct wa_xfer *xfer)
33451 {
33452- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
33453+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
33454 }
33455
33456 /*
33457diff -urNp linux-3.0.4/drivers/vhost/vhost.c linux-3.0.4/drivers/vhost/vhost.c
33458--- linux-3.0.4/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
33459+++ linux-3.0.4/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
33460@@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
33461 return get_user(vq->last_used_idx, &used->idx);
33462 }
33463
33464-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
33465+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
33466 {
33467 struct file *eventfp, *filep = NULL,
33468 *pollstart = NULL, *pollstop = NULL;
33469diff -urNp linux-3.0.4/drivers/video/fbcmap.c linux-3.0.4/drivers/video/fbcmap.c
33470--- linux-3.0.4/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
33471+++ linux-3.0.4/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
33472@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
33473 rc = -ENODEV;
33474 goto out;
33475 }
33476- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
33477- !info->fbops->fb_setcmap)) {
33478+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
33479 rc = -EINVAL;
33480 goto out1;
33481 }
33482diff -urNp linux-3.0.4/drivers/video/fbmem.c linux-3.0.4/drivers/video/fbmem.c
33483--- linux-3.0.4/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
33484+++ linux-3.0.4/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
33485@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
33486 image->dx += image->width + 8;
33487 }
33488 } else if (rotate == FB_ROTATE_UD) {
33489- for (x = 0; x < num && image->dx >= 0; x++) {
33490+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
33491 info->fbops->fb_imageblit(info, image);
33492 image->dx -= image->width + 8;
33493 }
33494@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
33495 image->dy += image->height + 8;
33496 }
33497 } else if (rotate == FB_ROTATE_CCW) {
33498- for (x = 0; x < num && image->dy >= 0; x++) {
33499+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
33500 info->fbops->fb_imageblit(info, image);
33501 image->dy -= image->height + 8;
33502 }
33503@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
33504 int flags = info->flags;
33505 int ret = 0;
33506
33507+ pax_track_stack();
33508+
33509 if (var->activate & FB_ACTIVATE_INV_MODE) {
33510 struct fb_videomode mode1, mode2;
33511
33512@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
33513 void __user *argp = (void __user *)arg;
33514 long ret = 0;
33515
33516+ pax_track_stack();
33517+
33518 switch (cmd) {
33519 case FBIOGET_VSCREENINFO:
33520 if (!lock_fb_info(info))
33521@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
33522 return -EFAULT;
33523 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
33524 return -EINVAL;
33525- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
33526+ if (con2fb.framebuffer >= FB_MAX)
33527 return -EINVAL;
33528 if (!registered_fb[con2fb.framebuffer])
33529 request_module("fb%d", con2fb.framebuffer);
33530diff -urNp linux-3.0.4/drivers/video/i810/i810_accel.c linux-3.0.4/drivers/video/i810/i810_accel.c
33531--- linux-3.0.4/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
33532+++ linux-3.0.4/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
33533@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
33534 }
33535 }
33536 printk("ringbuffer lockup!!!\n");
33537+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
33538 i810_report_error(mmio);
33539 par->dev_flags |= LOCKUP;
33540 info->pixmap.scan_align = 1;
33541diff -urNp linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm
33542--- linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm 2011-07-21 22:17:23.000000000 -0400
33543+++ linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm 2011-08-29 23:49:40.000000000 -0400
33544@@ -1,1604 +1,1123 @@
33545 P3
33546-# Standard 224-color Linux logo
33547 80 80
33548 255
33549- 0 0 0 0 0 0 0 0 0 0 0 0
33550- 0 0 0 0 0 0 0 0 0 0 0 0
33551- 0 0 0 0 0 0 0 0 0 0 0 0
33552- 0 0 0 0 0 0 0 0 0 0 0 0
33553- 0 0 0 0 0 0 0 0 0 0 0 0
33554- 0 0 0 0 0 0 0 0 0 0 0 0
33555- 0 0 0 0 0 0 0 0 0 0 0 0
33556- 0 0 0 0 0 0 0 0 0 0 0 0
33557- 0 0 0 0 0 0 0 0 0 0 0 0
33558- 6 6 6 6 6 6 10 10 10 10 10 10
33559- 10 10 10 6 6 6 6 6 6 6 6 6
33560- 0 0 0 0 0 0 0 0 0 0 0 0
33561- 0 0 0 0 0 0 0 0 0 0 0 0
33562- 0 0 0 0 0 0 0 0 0 0 0 0
33563- 0 0 0 0 0 0 0 0 0 0 0 0
33564- 0 0 0 0 0 0 0 0 0 0 0 0
33565- 0 0 0 0 0 0 0 0 0 0 0 0
33566- 0 0 0 0 0 0 0 0 0 0 0 0
33567- 0 0 0 0 0 0 0 0 0 0 0 0
33568- 0 0 0 0 0 0 0 0 0 0 0 0
33569- 0 0 0 0 0 0 0 0 0 0 0 0
33570- 0 0 0 0 0 0 0 0 0 0 0 0
33571- 0 0 0 0 0 0 0 0 0 0 0 0
33572- 0 0 0 0 0 0 0 0 0 0 0 0
33573- 0 0 0 0 0 0 0 0 0 0 0 0
33574- 0 0 0 0 0 0 0 0 0 0 0 0
33575- 0 0 0 0 0 0 0 0 0 0 0 0
33576- 0 0 0 0 0 0 0 0 0 0 0 0
33577- 0 0 0 6 6 6 10 10 10 14 14 14
33578- 22 22 22 26 26 26 30 30 30 34 34 34
33579- 30 30 30 30 30 30 26 26 26 18 18 18
33580- 14 14 14 10 10 10 6 6 6 0 0 0
33581- 0 0 0 0 0 0 0 0 0 0 0 0
33582- 0 0 0 0 0 0 0 0 0 0 0 0
33583- 0 0 0 0 0 0 0 0 0 0 0 0
33584- 0 0 0 0 0 0 0 0 0 0 0 0
33585- 0 0 0 0 0 0 0 0 0 0 0 0
33586- 0 0 0 0 0 0 0 0 0 0 0 0
33587- 0 0 0 0 0 0 0 0 0 0 0 0
33588- 0 0 0 0 0 0 0 0 0 0 0 0
33589- 0 0 0 0 0 0 0 0 0 0 0 0
33590- 0 0 0 0 0 1 0 0 1 0 0 0
33591- 0 0 0 0 0 0 0 0 0 0 0 0
33592- 0 0 0 0 0 0 0 0 0 0 0 0
33593- 0 0 0 0 0 0 0 0 0 0 0 0
33594- 0 0 0 0 0 0 0 0 0 0 0 0
33595- 0 0 0 0 0 0 0 0 0 0 0 0
33596- 0 0 0 0 0 0 0 0 0 0 0 0
33597- 6 6 6 14 14 14 26 26 26 42 42 42
33598- 54 54 54 66 66 66 78 78 78 78 78 78
33599- 78 78 78 74 74 74 66 66 66 54 54 54
33600- 42 42 42 26 26 26 18 18 18 10 10 10
33601- 6 6 6 0 0 0 0 0 0 0 0 0
33602- 0 0 0 0 0 0 0 0 0 0 0 0
33603- 0 0 0 0 0 0 0 0 0 0 0 0
33604- 0 0 0 0 0 0 0 0 0 0 0 0
33605- 0 0 0 0 0 0 0 0 0 0 0 0
33606- 0 0 0 0 0 0 0 0 0 0 0 0
33607- 0 0 0 0 0 0 0 0 0 0 0 0
33608- 0 0 0 0 0 0 0 0 0 0 0 0
33609- 0 0 0 0 0 0 0 0 0 0 0 0
33610- 0 0 1 0 0 0 0 0 0 0 0 0
33611- 0 0 0 0 0 0 0 0 0 0 0 0
33612- 0 0 0 0 0 0 0 0 0 0 0 0
33613- 0 0 0 0 0 0 0 0 0 0 0 0
33614- 0 0 0 0 0 0 0 0 0 0 0 0
33615- 0 0 0 0 0 0 0 0 0 0 0 0
33616- 0 0 0 0 0 0 0 0 0 10 10 10
33617- 22 22 22 42 42 42 66 66 66 86 86 86
33618- 66 66 66 38 38 38 38 38 38 22 22 22
33619- 26 26 26 34 34 34 54 54 54 66 66 66
33620- 86 86 86 70 70 70 46 46 46 26 26 26
33621- 14 14 14 6 6 6 0 0 0 0 0 0
33622- 0 0 0 0 0 0 0 0 0 0 0 0
33623- 0 0 0 0 0 0 0 0 0 0 0 0
33624- 0 0 0 0 0 0 0 0 0 0 0 0
33625- 0 0 0 0 0 0 0 0 0 0 0 0
33626- 0 0 0 0 0 0 0 0 0 0 0 0
33627- 0 0 0 0 0 0 0 0 0 0 0 0
33628- 0 0 0 0 0 0 0 0 0 0 0 0
33629- 0 0 0 0 0 0 0 0 0 0 0 0
33630- 0 0 1 0 0 1 0 0 1 0 0 0
33631- 0 0 0 0 0 0 0 0 0 0 0 0
33632- 0 0 0 0 0 0 0 0 0 0 0 0
33633- 0 0 0 0 0 0 0 0 0 0 0 0
33634- 0 0 0 0 0 0 0 0 0 0 0 0
33635- 0 0 0 0 0 0 0 0 0 0 0 0
33636- 0 0 0 0 0 0 10 10 10 26 26 26
33637- 50 50 50 82 82 82 58 58 58 6 6 6
33638- 2 2 6 2 2 6 2 2 6 2 2 6
33639- 2 2 6 2 2 6 2 2 6 2 2 6
33640- 6 6 6 54 54 54 86 86 86 66 66 66
33641- 38 38 38 18 18 18 6 6 6 0 0 0
33642- 0 0 0 0 0 0 0 0 0 0 0 0
33643- 0 0 0 0 0 0 0 0 0 0 0 0
33644- 0 0 0 0 0 0 0 0 0 0 0 0
33645- 0 0 0 0 0 0 0 0 0 0 0 0
33646- 0 0 0 0 0 0 0 0 0 0 0 0
33647- 0 0 0 0 0 0 0 0 0 0 0 0
33648- 0 0 0 0 0 0 0 0 0 0 0 0
33649- 0 0 0 0 0 0 0 0 0 0 0 0
33650- 0 0 0 0 0 0 0 0 0 0 0 0
33651- 0 0 0 0 0 0 0 0 0 0 0 0
33652- 0 0 0 0 0 0 0 0 0 0 0 0
33653- 0 0 0 0 0 0 0 0 0 0 0 0
33654- 0 0 0 0 0 0 0 0 0 0 0 0
33655- 0 0 0 0 0 0 0 0 0 0 0 0
33656- 0 0 0 6 6 6 22 22 22 50 50 50
33657- 78 78 78 34 34 34 2 2 6 2 2 6
33658- 2 2 6 2 2 6 2 2 6 2 2 6
33659- 2 2 6 2 2 6 2 2 6 2 2 6
33660- 2 2 6 2 2 6 6 6 6 70 70 70
33661- 78 78 78 46 46 46 22 22 22 6 6 6
33662- 0 0 0 0 0 0 0 0 0 0 0 0
33663- 0 0 0 0 0 0 0 0 0 0 0 0
33664- 0 0 0 0 0 0 0 0 0 0 0 0
33665- 0 0 0 0 0 0 0 0 0 0 0 0
33666- 0 0 0 0 0 0 0 0 0 0 0 0
33667- 0 0 0 0 0 0 0 0 0 0 0 0
33668- 0 0 0 0 0 0 0 0 0 0 0 0
33669- 0 0 0 0 0 0 0 0 0 0 0 0
33670- 0 0 1 0 0 1 0 0 1 0 0 0
33671- 0 0 0 0 0 0 0 0 0 0 0 0
33672- 0 0 0 0 0 0 0 0 0 0 0 0
33673- 0 0 0 0 0 0 0 0 0 0 0 0
33674- 0 0 0 0 0 0 0 0 0 0 0 0
33675- 0 0 0 0 0 0 0 0 0 0 0 0
33676- 6 6 6 18 18 18 42 42 42 82 82 82
33677- 26 26 26 2 2 6 2 2 6 2 2 6
33678- 2 2 6 2 2 6 2 2 6 2 2 6
33679- 2 2 6 2 2 6 2 2 6 14 14 14
33680- 46 46 46 34 34 34 6 6 6 2 2 6
33681- 42 42 42 78 78 78 42 42 42 18 18 18
33682- 6 6 6 0 0 0 0 0 0 0 0 0
33683- 0 0 0 0 0 0 0 0 0 0 0 0
33684- 0 0 0 0 0 0 0 0 0 0 0 0
33685- 0 0 0 0 0 0 0 0 0 0 0 0
33686- 0 0 0 0 0 0 0 0 0 0 0 0
33687- 0 0 0 0 0 0 0 0 0 0 0 0
33688- 0 0 0 0 0 0 0 0 0 0 0 0
33689- 0 0 0 0 0 0 0 0 0 0 0 0
33690- 0 0 1 0 0 0 0 0 1 0 0 0
33691- 0 0 0 0 0 0 0 0 0 0 0 0
33692- 0 0 0 0 0 0 0 0 0 0 0 0
33693- 0 0 0 0 0 0 0 0 0 0 0 0
33694- 0 0 0 0 0 0 0 0 0 0 0 0
33695- 0 0 0 0 0 0 0 0 0 0 0 0
33696- 10 10 10 30 30 30 66 66 66 58 58 58
33697- 2 2 6 2 2 6 2 2 6 2 2 6
33698- 2 2 6 2 2 6 2 2 6 2 2 6
33699- 2 2 6 2 2 6 2 2 6 26 26 26
33700- 86 86 86 101 101 101 46 46 46 10 10 10
33701- 2 2 6 58 58 58 70 70 70 34 34 34
33702- 10 10 10 0 0 0 0 0 0 0 0 0
33703- 0 0 0 0 0 0 0 0 0 0 0 0
33704- 0 0 0 0 0 0 0 0 0 0 0 0
33705- 0 0 0 0 0 0 0 0 0 0 0 0
33706- 0 0 0 0 0 0 0 0 0 0 0 0
33707- 0 0 0 0 0 0 0 0 0 0 0 0
33708- 0 0 0 0 0 0 0 0 0 0 0 0
33709- 0 0 0 0 0 0 0 0 0 0 0 0
33710- 0 0 1 0 0 1 0 0 1 0 0 0
33711- 0 0 0 0 0 0 0 0 0 0 0 0
33712- 0 0 0 0 0 0 0 0 0 0 0 0
33713- 0 0 0 0 0 0 0 0 0 0 0 0
33714- 0 0 0 0 0 0 0 0 0 0 0 0
33715- 0 0 0 0 0 0 0 0 0 0 0 0
33716- 14 14 14 42 42 42 86 86 86 10 10 10
33717- 2 2 6 2 2 6 2 2 6 2 2 6
33718- 2 2 6 2 2 6 2 2 6 2 2 6
33719- 2 2 6 2 2 6 2 2 6 30 30 30
33720- 94 94 94 94 94 94 58 58 58 26 26 26
33721- 2 2 6 6 6 6 78 78 78 54 54 54
33722- 22 22 22 6 6 6 0 0 0 0 0 0
33723- 0 0 0 0 0 0 0 0 0 0 0 0
33724- 0 0 0 0 0 0 0 0 0 0 0 0
33725- 0 0 0 0 0 0 0 0 0 0 0 0
33726- 0 0 0 0 0 0 0 0 0 0 0 0
33727- 0 0 0 0 0 0 0 0 0 0 0 0
33728- 0 0 0 0 0 0 0 0 0 0 0 0
33729- 0 0 0 0 0 0 0 0 0 0 0 0
33730- 0 0 0 0 0 0 0 0 0 0 0 0
33731- 0 0 0 0 0 0 0 0 0 0 0 0
33732- 0 0 0 0 0 0 0 0 0 0 0 0
33733- 0 0 0 0 0 0 0 0 0 0 0 0
33734- 0 0 0 0 0 0 0 0 0 0 0 0
33735- 0 0 0 0 0 0 0 0 0 6 6 6
33736- 22 22 22 62 62 62 62 62 62 2 2 6
33737- 2 2 6 2 2 6 2 2 6 2 2 6
33738- 2 2 6 2 2 6 2 2 6 2 2 6
33739- 2 2 6 2 2 6 2 2 6 26 26 26
33740- 54 54 54 38 38 38 18 18 18 10 10 10
33741- 2 2 6 2 2 6 34 34 34 82 82 82
33742- 38 38 38 14 14 14 0 0 0 0 0 0
33743- 0 0 0 0 0 0 0 0 0 0 0 0
33744- 0 0 0 0 0 0 0 0 0 0 0 0
33745- 0 0 0 0 0 0 0 0 0 0 0 0
33746- 0 0 0 0 0 0 0 0 0 0 0 0
33747- 0 0 0 0 0 0 0 0 0 0 0 0
33748- 0 0 0 0 0 0 0 0 0 0 0 0
33749- 0 0 0 0 0 0 0 0 0 0 0 0
33750- 0 0 0 0 0 1 0 0 1 0 0 0
33751- 0 0 0 0 0 0 0 0 0 0 0 0
33752- 0 0 0 0 0 0 0 0 0 0 0 0
33753- 0 0 0 0 0 0 0 0 0 0 0 0
33754- 0 0 0 0 0 0 0 0 0 0 0 0
33755- 0 0 0 0 0 0 0 0 0 6 6 6
33756- 30 30 30 78 78 78 30 30 30 2 2 6
33757- 2 2 6 2 2 6 2 2 6 2 2 6
33758- 2 2 6 2 2 6 2 2 6 2 2 6
33759- 2 2 6 2 2 6 2 2 6 10 10 10
33760- 10 10 10 2 2 6 2 2 6 2 2 6
33761- 2 2 6 2 2 6 2 2 6 78 78 78
33762- 50 50 50 18 18 18 6 6 6 0 0 0
33763- 0 0 0 0 0 0 0 0 0 0 0 0
33764- 0 0 0 0 0 0 0 0 0 0 0 0
33765- 0 0 0 0 0 0 0 0 0 0 0 0
33766- 0 0 0 0 0 0 0 0 0 0 0 0
33767- 0 0 0 0 0 0 0 0 0 0 0 0
33768- 0 0 0 0 0 0 0 0 0 0 0 0
33769- 0 0 0 0 0 0 0 0 0 0 0 0
33770- 0 0 1 0 0 0 0 0 0 0 0 0
33771- 0 0 0 0 0 0 0 0 0 0 0 0
33772- 0 0 0 0 0 0 0 0 0 0 0 0
33773- 0 0 0 0 0 0 0 0 0 0 0 0
33774- 0 0 0 0 0 0 0 0 0 0 0 0
33775- 0 0 0 0 0 0 0 0 0 10 10 10
33776- 38 38 38 86 86 86 14 14 14 2 2 6
33777- 2 2 6 2 2 6 2 2 6 2 2 6
33778- 2 2 6 2 2 6 2 2 6 2 2 6
33779- 2 2 6 2 2 6 2 2 6 2 2 6
33780- 2 2 6 2 2 6 2 2 6 2 2 6
33781- 2 2 6 2 2 6 2 2 6 54 54 54
33782- 66 66 66 26 26 26 6 6 6 0 0 0
33783- 0 0 0 0 0 0 0 0 0 0 0 0
33784- 0 0 0 0 0 0 0 0 0 0 0 0
33785- 0 0 0 0 0 0 0 0 0 0 0 0
33786- 0 0 0 0 0 0 0 0 0 0 0 0
33787- 0 0 0 0 0 0 0 0 0 0 0 0
33788- 0 0 0 0 0 0 0 0 0 0 0 0
33789- 0 0 0 0 0 0 0 0 0 0 0 0
33790- 0 0 0 0 0 1 0 0 1 0 0 0
33791- 0 0 0 0 0 0 0 0 0 0 0 0
33792- 0 0 0 0 0 0 0 0 0 0 0 0
33793- 0 0 0 0 0 0 0 0 0 0 0 0
33794- 0 0 0 0 0 0 0 0 0 0 0 0
33795- 0 0 0 0 0 0 0 0 0 14 14 14
33796- 42 42 42 82 82 82 2 2 6 2 2 6
33797- 2 2 6 6 6 6 10 10 10 2 2 6
33798- 2 2 6 2 2 6 2 2 6 2 2 6
33799- 2 2 6 2 2 6 2 2 6 6 6 6
33800- 14 14 14 10 10 10 2 2 6 2 2 6
33801- 2 2 6 2 2 6 2 2 6 18 18 18
33802- 82 82 82 34 34 34 10 10 10 0 0 0
33803- 0 0 0 0 0 0 0 0 0 0 0 0
33804- 0 0 0 0 0 0 0 0 0 0 0 0
33805- 0 0 0 0 0 0 0 0 0 0 0 0
33806- 0 0 0 0 0 0 0 0 0 0 0 0
33807- 0 0 0 0 0 0 0 0 0 0 0 0
33808- 0 0 0 0 0 0 0 0 0 0 0 0
33809- 0 0 0 0 0 0 0 0 0 0 0 0
33810- 0 0 1 0 0 0 0 0 0 0 0 0
33811- 0 0 0 0 0 0 0 0 0 0 0 0
33812- 0 0 0 0 0 0 0 0 0 0 0 0
33813- 0 0 0 0 0 0 0 0 0 0 0 0
33814- 0 0 0 0 0 0 0 0 0 0 0 0
33815- 0 0 0 0 0 0 0 0 0 14 14 14
33816- 46 46 46 86 86 86 2 2 6 2 2 6
33817- 6 6 6 6 6 6 22 22 22 34 34 34
33818- 6 6 6 2 2 6 2 2 6 2 2 6
33819- 2 2 6 2 2 6 18 18 18 34 34 34
33820- 10 10 10 50 50 50 22 22 22 2 2 6
33821- 2 2 6 2 2 6 2 2 6 10 10 10
33822- 86 86 86 42 42 42 14 14 14 0 0 0
33823- 0 0 0 0 0 0 0 0 0 0 0 0
33824- 0 0 0 0 0 0 0 0 0 0 0 0
33825- 0 0 0 0 0 0 0 0 0 0 0 0
33826- 0 0 0 0 0 0 0 0 0 0 0 0
33827- 0 0 0 0 0 0 0 0 0 0 0 0
33828- 0 0 0 0 0 0 0 0 0 0 0 0
33829- 0 0 0 0 0 0 0 0 0 0 0 0
33830- 0 0 1 0 0 1 0 0 1 0 0 0
33831- 0 0 0 0 0 0 0 0 0 0 0 0
33832- 0 0 0 0 0 0 0 0 0 0 0 0
33833- 0 0 0 0 0 0 0 0 0 0 0 0
33834- 0 0 0 0 0 0 0 0 0 0 0 0
33835- 0 0 0 0 0 0 0 0 0 14 14 14
33836- 46 46 46 86 86 86 2 2 6 2 2 6
33837- 38 38 38 116 116 116 94 94 94 22 22 22
33838- 22 22 22 2 2 6 2 2 6 2 2 6
33839- 14 14 14 86 86 86 138 138 138 162 162 162
33840-154 154 154 38 38 38 26 26 26 6 6 6
33841- 2 2 6 2 2 6 2 2 6 2 2 6
33842- 86 86 86 46 46 46 14 14 14 0 0 0
33843- 0 0 0 0 0 0 0 0 0 0 0 0
33844- 0 0 0 0 0 0 0 0 0 0 0 0
33845- 0 0 0 0 0 0 0 0 0 0 0 0
33846- 0 0 0 0 0 0 0 0 0 0 0 0
33847- 0 0 0 0 0 0 0 0 0 0 0 0
33848- 0 0 0 0 0 0 0 0 0 0 0 0
33849- 0 0 0 0 0 0 0 0 0 0 0 0
33850- 0 0 0 0 0 0 0 0 0 0 0 0
33851- 0 0 0 0 0 0 0 0 0 0 0 0
33852- 0 0 0 0 0 0 0 0 0 0 0 0
33853- 0 0 0 0 0 0 0 0 0 0 0 0
33854- 0 0 0 0 0 0 0 0 0 0 0 0
33855- 0 0 0 0 0 0 0 0 0 14 14 14
33856- 46 46 46 86 86 86 2 2 6 14 14 14
33857-134 134 134 198 198 198 195 195 195 116 116 116
33858- 10 10 10 2 2 6 2 2 6 6 6 6
33859-101 98 89 187 187 187 210 210 210 218 218 218
33860-214 214 214 134 134 134 14 14 14 6 6 6
33861- 2 2 6 2 2 6 2 2 6 2 2 6
33862- 86 86 86 50 50 50 18 18 18 6 6 6
33863- 0 0 0 0 0 0 0 0 0 0 0 0
33864- 0 0 0 0 0 0 0 0 0 0 0 0
33865- 0 0 0 0 0 0 0 0 0 0 0 0
33866- 0 0 0 0 0 0 0 0 0 0 0 0
33867- 0 0 0 0 0 0 0 0 0 0 0 0
33868- 0 0 0 0 0 0 0 0 0 0 0 0
33869- 0 0 0 0 0 0 0 0 1 0 0 0
33870- 0 0 1 0 0 1 0 0 1 0 0 0
33871- 0 0 0 0 0 0 0 0 0 0 0 0
33872- 0 0 0 0 0 0 0 0 0 0 0 0
33873- 0 0 0 0 0 0 0 0 0 0 0 0
33874- 0 0 0 0 0 0 0 0 0 0 0 0
33875- 0 0 0 0 0 0 0 0 0 14 14 14
33876- 46 46 46 86 86 86 2 2 6 54 54 54
33877-218 218 218 195 195 195 226 226 226 246 246 246
33878- 58 58 58 2 2 6 2 2 6 30 30 30
33879-210 210 210 253 253 253 174 174 174 123 123 123
33880-221 221 221 234 234 234 74 74 74 2 2 6
33881- 2 2 6 2 2 6 2 2 6 2 2 6
33882- 70 70 70 58 58 58 22 22 22 6 6 6
33883- 0 0 0 0 0 0 0 0 0 0 0 0
33884- 0 0 0 0 0 0 0 0 0 0 0 0
33885- 0 0 0 0 0 0 0 0 0 0 0 0
33886- 0 0 0 0 0 0 0 0 0 0 0 0
33887- 0 0 0 0 0 0 0 0 0 0 0 0
33888- 0 0 0 0 0 0 0 0 0 0 0 0
33889- 0 0 0 0 0 0 0 0 0 0 0 0
33890- 0 0 0 0 0 0 0 0 0 0 0 0
33891- 0 0 0 0 0 0 0 0 0 0 0 0
33892- 0 0 0 0 0 0 0 0 0 0 0 0
33893- 0 0 0 0 0 0 0 0 0 0 0 0
33894- 0 0 0 0 0 0 0 0 0 0 0 0
33895- 0 0 0 0 0 0 0 0 0 14 14 14
33896- 46 46 46 82 82 82 2 2 6 106 106 106
33897-170 170 170 26 26 26 86 86 86 226 226 226
33898-123 123 123 10 10 10 14 14 14 46 46 46
33899-231 231 231 190 190 190 6 6 6 70 70 70
33900- 90 90 90 238 238 238 158 158 158 2 2 6
33901- 2 2 6 2 2 6 2 2 6 2 2 6
33902- 70 70 70 58 58 58 22 22 22 6 6 6
33903- 0 0 0 0 0 0 0 0 0 0 0 0
33904- 0 0 0 0 0 0 0 0 0 0 0 0
33905- 0 0 0 0 0 0 0 0 0 0 0 0
33906- 0 0 0 0 0 0 0 0 0 0 0 0
33907- 0 0 0 0 0 0 0 0 0 0 0 0
33908- 0 0 0 0 0 0 0 0 0 0 0 0
33909- 0 0 0 0 0 0 0 0 1 0 0 0
33910- 0 0 1 0 0 1 0 0 1 0 0 0
33911- 0 0 0 0 0 0 0 0 0 0 0 0
33912- 0 0 0 0 0 0 0 0 0 0 0 0
33913- 0 0 0 0 0 0 0 0 0 0 0 0
33914- 0 0 0 0 0 0 0 0 0 0 0 0
33915- 0 0 0 0 0 0 0 0 0 14 14 14
33916- 42 42 42 86 86 86 6 6 6 116 116 116
33917-106 106 106 6 6 6 70 70 70 149 149 149
33918-128 128 128 18 18 18 38 38 38 54 54 54
33919-221 221 221 106 106 106 2 2 6 14 14 14
33920- 46 46 46 190 190 190 198 198 198 2 2 6
33921- 2 2 6 2 2 6 2 2 6 2 2 6
33922- 74 74 74 62 62 62 22 22 22 6 6 6
33923- 0 0 0 0 0 0 0 0 0 0 0 0
33924- 0 0 0 0 0 0 0 0 0 0 0 0
33925- 0 0 0 0 0 0 0 0 0 0 0 0
33926- 0 0 0 0 0 0 0 0 0 0 0 0
33927- 0 0 0 0 0 0 0 0 0 0 0 0
33928- 0 0 0 0 0 0 0 0 0 0 0 0
33929- 0 0 0 0 0 0 0 0 1 0 0 0
33930- 0 0 1 0 0 0 0 0 1 0 0 0
33931- 0 0 0 0 0 0 0 0 0 0 0 0
33932- 0 0 0 0 0 0 0 0 0 0 0 0
33933- 0 0 0 0 0 0 0 0 0 0 0 0
33934- 0 0 0 0 0 0 0 0 0 0 0 0
33935- 0 0 0 0 0 0 0 0 0 14 14 14
33936- 42 42 42 94 94 94 14 14 14 101 101 101
33937-128 128 128 2 2 6 18 18 18 116 116 116
33938-118 98 46 121 92 8 121 92 8 98 78 10
33939-162 162 162 106 106 106 2 2 6 2 2 6
33940- 2 2 6 195 195 195 195 195 195 6 6 6
33941- 2 2 6 2 2 6 2 2 6 2 2 6
33942- 74 74 74 62 62 62 22 22 22 6 6 6
33943- 0 0 0 0 0 0 0 0 0 0 0 0
33944- 0 0 0 0 0 0 0 0 0 0 0 0
33945- 0 0 0 0 0 0 0 0 0 0 0 0
33946- 0 0 0 0 0 0 0 0 0 0 0 0
33947- 0 0 0 0 0 0 0 0 0 0 0 0
33948- 0 0 0 0 0 0 0 0 0 0 0 0
33949- 0 0 0 0 0 0 0 0 1 0 0 1
33950- 0 0 1 0 0 0 0 0 1 0 0 0
33951- 0 0 0 0 0 0 0 0 0 0 0 0
33952- 0 0 0 0 0 0 0 0 0 0 0 0
33953- 0 0 0 0 0 0 0 0 0 0 0 0
33954- 0 0 0 0 0 0 0 0 0 0 0 0
33955- 0 0 0 0 0 0 0 0 0 10 10 10
33956- 38 38 38 90 90 90 14 14 14 58 58 58
33957-210 210 210 26 26 26 54 38 6 154 114 10
33958-226 170 11 236 186 11 225 175 15 184 144 12
33959-215 174 15 175 146 61 37 26 9 2 2 6
33960- 70 70 70 246 246 246 138 138 138 2 2 6
33961- 2 2 6 2 2 6 2 2 6 2 2 6
33962- 70 70 70 66 66 66 26 26 26 6 6 6
33963- 0 0 0 0 0 0 0 0 0 0 0 0
33964- 0 0 0 0 0 0 0 0 0 0 0 0
33965- 0 0 0 0 0 0 0 0 0 0 0 0
33966- 0 0 0 0 0 0 0 0 0 0 0 0
33967- 0 0 0 0 0 0 0 0 0 0 0 0
33968- 0 0 0 0 0 0 0 0 0 0 0 0
33969- 0 0 0 0 0 0 0 0 0 0 0 0
33970- 0 0 0 0 0 0 0 0 0 0 0 0
33971- 0 0 0 0 0 0 0 0 0 0 0 0
33972- 0 0 0 0 0 0 0 0 0 0 0 0
33973- 0 0 0 0 0 0 0 0 0 0 0 0
33974- 0 0 0 0 0 0 0 0 0 0 0 0
33975- 0 0 0 0 0 0 0 0 0 10 10 10
33976- 38 38 38 86 86 86 14 14 14 10 10 10
33977-195 195 195 188 164 115 192 133 9 225 175 15
33978-239 182 13 234 190 10 232 195 16 232 200 30
33979-245 207 45 241 208 19 232 195 16 184 144 12
33980-218 194 134 211 206 186 42 42 42 2 2 6
33981- 2 2 6 2 2 6 2 2 6 2 2 6
33982- 50 50 50 74 74 74 30 30 30 6 6 6
33983- 0 0 0 0 0 0 0 0 0 0 0 0
33984- 0 0 0 0 0 0 0 0 0 0 0 0
33985- 0 0 0 0 0 0 0 0 0 0 0 0
33986- 0 0 0 0 0 0 0 0 0 0 0 0
33987- 0 0 0 0 0 0 0 0 0 0 0 0
33988- 0 0 0 0 0 0 0 0 0 0 0 0
33989- 0 0 0 0 0 0 0 0 0 0 0 0
33990- 0 0 0 0 0 0 0 0 0 0 0 0
33991- 0 0 0 0 0 0 0 0 0 0 0 0
33992- 0 0 0 0 0 0 0 0 0 0 0 0
33993- 0 0 0 0 0 0 0 0 0 0 0 0
33994- 0 0 0 0 0 0 0 0 0 0 0 0
33995- 0 0 0 0 0 0 0 0 0 10 10 10
33996- 34 34 34 86 86 86 14 14 14 2 2 6
33997-121 87 25 192 133 9 219 162 10 239 182 13
33998-236 186 11 232 195 16 241 208 19 244 214 54
33999-246 218 60 246 218 38 246 215 20 241 208 19
34000-241 208 19 226 184 13 121 87 25 2 2 6
34001- 2 2 6 2 2 6 2 2 6 2 2 6
34002- 50 50 50 82 82 82 34 34 34 10 10 10
34003- 0 0 0 0 0 0 0 0 0 0 0 0
34004- 0 0 0 0 0 0 0 0 0 0 0 0
34005- 0 0 0 0 0 0 0 0 0 0 0 0
34006- 0 0 0 0 0 0 0 0 0 0 0 0
34007- 0 0 0 0 0 0 0 0 0 0 0 0
34008- 0 0 0 0 0 0 0 0 0 0 0 0
34009- 0 0 0 0 0 0 0 0 0 0 0 0
34010- 0 0 0 0 0 0 0 0 0 0 0 0
34011- 0 0 0 0 0 0 0 0 0 0 0 0
34012- 0 0 0 0 0 0 0 0 0 0 0 0
34013- 0 0 0 0 0 0 0 0 0 0 0 0
34014- 0 0 0 0 0 0 0 0 0 0 0 0
34015- 0 0 0 0 0 0 0 0 0 10 10 10
34016- 34 34 34 82 82 82 30 30 30 61 42 6
34017-180 123 7 206 145 10 230 174 11 239 182 13
34018-234 190 10 238 202 15 241 208 19 246 218 74
34019-246 218 38 246 215 20 246 215 20 246 215 20
34020-226 184 13 215 174 15 184 144 12 6 6 6
34021- 2 2 6 2 2 6 2 2 6 2 2 6
34022- 26 26 26 94 94 94 42 42 42 14 14 14
34023- 0 0 0 0 0 0 0 0 0 0 0 0
34024- 0 0 0 0 0 0 0 0 0 0 0 0
34025- 0 0 0 0 0 0 0 0 0 0 0 0
34026- 0 0 0 0 0 0 0 0 0 0 0 0
34027- 0 0 0 0 0 0 0 0 0 0 0 0
34028- 0 0 0 0 0 0 0 0 0 0 0 0
34029- 0 0 0 0 0 0 0 0 0 0 0 0
34030- 0 0 0 0 0 0 0 0 0 0 0 0
34031- 0 0 0 0 0 0 0 0 0 0 0 0
34032- 0 0 0 0 0 0 0 0 0 0 0 0
34033- 0 0 0 0 0 0 0 0 0 0 0 0
34034- 0 0 0 0 0 0 0 0 0 0 0 0
34035- 0 0 0 0 0 0 0 0 0 10 10 10
34036- 30 30 30 78 78 78 50 50 50 104 69 6
34037-192 133 9 216 158 10 236 178 12 236 186 11
34038-232 195 16 241 208 19 244 214 54 245 215 43
34039-246 215 20 246 215 20 241 208 19 198 155 10
34040-200 144 11 216 158 10 156 118 10 2 2 6
34041- 2 2 6 2 2 6 2 2 6 2 2 6
34042- 6 6 6 90 90 90 54 54 54 18 18 18
34043- 6 6 6 0 0 0 0 0 0 0 0 0
34044- 0 0 0 0 0 0 0 0 0 0 0 0
34045- 0 0 0 0 0 0 0 0 0 0 0 0
34046- 0 0 0 0 0 0 0 0 0 0 0 0
34047- 0 0 0 0 0 0 0 0 0 0 0 0
34048- 0 0 0 0 0 0 0 0 0 0 0 0
34049- 0 0 0 0 0 0 0 0 0 0 0 0
34050- 0 0 0 0 0 0 0 0 0 0 0 0
34051- 0 0 0 0 0 0 0 0 0 0 0 0
34052- 0 0 0 0 0 0 0 0 0 0 0 0
34053- 0 0 0 0 0 0 0 0 0 0 0 0
34054- 0 0 0 0 0 0 0 0 0 0 0 0
34055- 0 0 0 0 0 0 0 0 0 10 10 10
34056- 30 30 30 78 78 78 46 46 46 22 22 22
34057-137 92 6 210 162 10 239 182 13 238 190 10
34058-238 202 15 241 208 19 246 215 20 246 215 20
34059-241 208 19 203 166 17 185 133 11 210 150 10
34060-216 158 10 210 150 10 102 78 10 2 2 6
34061- 6 6 6 54 54 54 14 14 14 2 2 6
34062- 2 2 6 62 62 62 74 74 74 30 30 30
34063- 10 10 10 0 0 0 0 0 0 0 0 0
34064- 0 0 0 0 0 0 0 0 0 0 0 0
34065- 0 0 0 0 0 0 0 0 0 0 0 0
34066- 0 0 0 0 0 0 0 0 0 0 0 0
34067- 0 0 0 0 0 0 0 0 0 0 0 0
34068- 0 0 0 0 0 0 0 0 0 0 0 0
34069- 0 0 0 0 0 0 0 0 0 0 0 0
34070- 0 0 0 0 0 0 0 0 0 0 0 0
34071- 0 0 0 0 0 0 0 0 0 0 0 0
34072- 0 0 0 0 0 0 0 0 0 0 0 0
34073- 0 0 0 0 0 0 0 0 0 0 0 0
34074- 0 0 0 0 0 0 0 0 0 0 0 0
34075- 0 0 0 0 0 0 0 0 0 10 10 10
34076- 34 34 34 78 78 78 50 50 50 6 6 6
34077- 94 70 30 139 102 15 190 146 13 226 184 13
34078-232 200 30 232 195 16 215 174 15 190 146 13
34079-168 122 10 192 133 9 210 150 10 213 154 11
34080-202 150 34 182 157 106 101 98 89 2 2 6
34081- 2 2 6 78 78 78 116 116 116 58 58 58
34082- 2 2 6 22 22 22 90 90 90 46 46 46
34083- 18 18 18 6 6 6 0 0 0 0 0 0
34084- 0 0 0 0 0 0 0 0 0 0 0 0
34085- 0 0 0 0 0 0 0 0 0 0 0 0
34086- 0 0 0 0 0 0 0 0 0 0 0 0
34087- 0 0 0 0 0 0 0 0 0 0 0 0
34088- 0 0 0 0 0 0 0 0 0 0 0 0
34089- 0 0 0 0 0 0 0 0 0 0 0 0
34090- 0 0 0 0 0 0 0 0 0 0 0 0
34091- 0 0 0 0 0 0 0 0 0 0 0 0
34092- 0 0 0 0 0 0 0 0 0 0 0 0
34093- 0 0 0 0 0 0 0 0 0 0 0 0
34094- 0 0 0 0 0 0 0 0 0 0 0 0
34095- 0 0 0 0 0 0 0 0 0 10 10 10
34096- 38 38 38 86 86 86 50 50 50 6 6 6
34097-128 128 128 174 154 114 156 107 11 168 122 10
34098-198 155 10 184 144 12 197 138 11 200 144 11
34099-206 145 10 206 145 10 197 138 11 188 164 115
34100-195 195 195 198 198 198 174 174 174 14 14 14
34101- 2 2 6 22 22 22 116 116 116 116 116 116
34102- 22 22 22 2 2 6 74 74 74 70 70 70
34103- 30 30 30 10 10 10 0 0 0 0 0 0
34104- 0 0 0 0 0 0 0 0 0 0 0 0
34105- 0 0 0 0 0 0 0 0 0 0 0 0
34106- 0 0 0 0 0 0 0 0 0 0 0 0
34107- 0 0 0 0 0 0 0 0 0 0 0 0
34108- 0 0 0 0 0 0 0 0 0 0 0 0
34109- 0 0 0 0 0 0 0 0 0 0 0 0
34110- 0 0 0 0 0 0 0 0 0 0 0 0
34111- 0 0 0 0 0 0 0 0 0 0 0 0
34112- 0 0 0 0 0 0 0 0 0 0 0 0
34113- 0 0 0 0 0 0 0 0 0 0 0 0
34114- 0 0 0 0 0 0 0 0 0 0 0 0
34115- 0 0 0 0 0 0 6 6 6 18 18 18
34116- 50 50 50 101 101 101 26 26 26 10 10 10
34117-138 138 138 190 190 190 174 154 114 156 107 11
34118-197 138 11 200 144 11 197 138 11 192 133 9
34119-180 123 7 190 142 34 190 178 144 187 187 187
34120-202 202 202 221 221 221 214 214 214 66 66 66
34121- 2 2 6 2 2 6 50 50 50 62 62 62
34122- 6 6 6 2 2 6 10 10 10 90 90 90
34123- 50 50 50 18 18 18 6 6 6 0 0 0
34124- 0 0 0 0 0 0 0 0 0 0 0 0
34125- 0 0 0 0 0 0 0 0 0 0 0 0
34126- 0 0 0 0 0 0 0 0 0 0 0 0
34127- 0 0 0 0 0 0 0 0 0 0 0 0
34128- 0 0 0 0 0 0 0 0 0 0 0 0
34129- 0 0 0 0 0 0 0 0 0 0 0 0
34130- 0 0 0 0 0 0 0 0 0 0 0 0
34131- 0 0 0 0 0 0 0 0 0 0 0 0
34132- 0 0 0 0 0 0 0 0 0 0 0 0
34133- 0 0 0 0 0 0 0 0 0 0 0 0
34134- 0 0 0 0 0 0 0 0 0 0 0 0
34135- 0 0 0 0 0 0 10 10 10 34 34 34
34136- 74 74 74 74 74 74 2 2 6 6 6 6
34137-144 144 144 198 198 198 190 190 190 178 166 146
34138-154 121 60 156 107 11 156 107 11 168 124 44
34139-174 154 114 187 187 187 190 190 190 210 210 210
34140-246 246 246 253 253 253 253 253 253 182 182 182
34141- 6 6 6 2 2 6 2 2 6 2 2 6
34142- 2 2 6 2 2 6 2 2 6 62 62 62
34143- 74 74 74 34 34 34 14 14 14 0 0 0
34144- 0 0 0 0 0 0 0 0 0 0 0 0
34145- 0 0 0 0 0 0 0 0 0 0 0 0
34146- 0 0 0 0 0 0 0 0 0 0 0 0
34147- 0 0 0 0 0 0 0 0 0 0 0 0
34148- 0 0 0 0 0 0 0 0 0 0 0 0
34149- 0 0 0 0 0 0 0 0 0 0 0 0
34150- 0 0 0 0 0 0 0 0 0 0 0 0
34151- 0 0 0 0 0 0 0 0 0 0 0 0
34152- 0 0 0 0 0 0 0 0 0 0 0 0
34153- 0 0 0 0 0 0 0 0 0 0 0 0
34154- 0 0 0 0 0 0 0 0 0 0 0 0
34155- 0 0 0 10 10 10 22 22 22 54 54 54
34156- 94 94 94 18 18 18 2 2 6 46 46 46
34157-234 234 234 221 221 221 190 190 190 190 190 190
34158-190 190 190 187 187 187 187 187 187 190 190 190
34159-190 190 190 195 195 195 214 214 214 242 242 242
34160-253 253 253 253 253 253 253 253 253 253 253 253
34161- 82 82 82 2 2 6 2 2 6 2 2 6
34162- 2 2 6 2 2 6 2 2 6 14 14 14
34163- 86 86 86 54 54 54 22 22 22 6 6 6
34164- 0 0 0 0 0 0 0 0 0 0 0 0
34165- 0 0 0 0 0 0 0 0 0 0 0 0
34166- 0 0 0 0 0 0 0 0 0 0 0 0
34167- 0 0 0 0 0 0 0 0 0 0 0 0
34168- 0 0 0 0 0 0 0 0 0 0 0 0
34169- 0 0 0 0 0 0 0 0 0 0 0 0
34170- 0 0 0 0 0 0 0 0 0 0 0 0
34171- 0 0 0 0 0 0 0 0 0 0 0 0
34172- 0 0 0 0 0 0 0 0 0 0 0 0
34173- 0 0 0 0 0 0 0 0 0 0 0 0
34174- 0 0 0 0 0 0 0 0 0 0 0 0
34175- 6 6 6 18 18 18 46 46 46 90 90 90
34176- 46 46 46 18 18 18 6 6 6 182 182 182
34177-253 253 253 246 246 246 206 206 206 190 190 190
34178-190 190 190 190 190 190 190 190 190 190 190 190
34179-206 206 206 231 231 231 250 250 250 253 253 253
34180-253 253 253 253 253 253 253 253 253 253 253 253
34181-202 202 202 14 14 14 2 2 6 2 2 6
34182- 2 2 6 2 2 6 2 2 6 2 2 6
34183- 42 42 42 86 86 86 42 42 42 18 18 18
34184- 6 6 6 0 0 0 0 0 0 0 0 0
34185- 0 0 0 0 0 0 0 0 0 0 0 0
34186- 0 0 0 0 0 0 0 0 0 0 0 0
34187- 0 0 0 0 0 0 0 0 0 0 0 0
34188- 0 0 0 0 0 0 0 0 0 0 0 0
34189- 0 0 0 0 0 0 0 0 0 0 0 0
34190- 0 0 0 0 0 0 0 0 0 0 0 0
34191- 0 0 0 0 0 0 0 0 0 0 0 0
34192- 0 0 0 0 0 0 0 0 0 0 0 0
34193- 0 0 0 0 0 0 0 0 0 0 0 0
34194- 0 0 0 0 0 0 0 0 0 6 6 6
34195- 14 14 14 38 38 38 74 74 74 66 66 66
34196- 2 2 6 6 6 6 90 90 90 250 250 250
34197-253 253 253 253 253 253 238 238 238 198 198 198
34198-190 190 190 190 190 190 195 195 195 221 221 221
34199-246 246 246 253 253 253 253 253 253 253 253 253
34200-253 253 253 253 253 253 253 253 253 253 253 253
34201-253 253 253 82 82 82 2 2 6 2 2 6
34202- 2 2 6 2 2 6 2 2 6 2 2 6
34203- 2 2 6 78 78 78 70 70 70 34 34 34
34204- 14 14 14 6 6 6 0 0 0 0 0 0
34205- 0 0 0 0 0 0 0 0 0 0 0 0
34206- 0 0 0 0 0 0 0 0 0 0 0 0
34207- 0 0 0 0 0 0 0 0 0 0 0 0
34208- 0 0 0 0 0 0 0 0 0 0 0 0
34209- 0 0 0 0 0 0 0 0 0 0 0 0
34210- 0 0 0 0 0 0 0 0 0 0 0 0
34211- 0 0 0 0 0 0 0 0 0 0 0 0
34212- 0 0 0 0 0 0 0 0 0 0 0 0
34213- 0 0 0 0 0 0 0 0 0 0 0 0
34214- 0 0 0 0 0 0 0 0 0 14 14 14
34215- 34 34 34 66 66 66 78 78 78 6 6 6
34216- 2 2 6 18 18 18 218 218 218 253 253 253
34217-253 253 253 253 253 253 253 253 253 246 246 246
34218-226 226 226 231 231 231 246 246 246 253 253 253
34219-253 253 253 253 253 253 253 253 253 253 253 253
34220-253 253 253 253 253 253 253 253 253 253 253 253
34221-253 253 253 178 178 178 2 2 6 2 2 6
34222- 2 2 6 2 2 6 2 2 6 2 2 6
34223- 2 2 6 18 18 18 90 90 90 62 62 62
34224- 30 30 30 10 10 10 0 0 0 0 0 0
34225- 0 0 0 0 0 0 0 0 0 0 0 0
34226- 0 0 0 0 0 0 0 0 0 0 0 0
34227- 0 0 0 0 0 0 0 0 0 0 0 0
34228- 0 0 0 0 0 0 0 0 0 0 0 0
34229- 0 0 0 0 0 0 0 0 0 0 0 0
34230- 0 0 0 0 0 0 0 0 0 0 0 0
34231- 0 0 0 0 0 0 0 0 0 0 0 0
34232- 0 0 0 0 0 0 0 0 0 0 0 0
34233- 0 0 0 0 0 0 0 0 0 0 0 0
34234- 0 0 0 0 0 0 10 10 10 26 26 26
34235- 58 58 58 90 90 90 18 18 18 2 2 6
34236- 2 2 6 110 110 110 253 253 253 253 253 253
34237-253 253 253 253 253 253 253 253 253 253 253 253
34238-250 250 250 253 253 253 253 253 253 253 253 253
34239-253 253 253 253 253 253 253 253 253 253 253 253
34240-253 253 253 253 253 253 253 253 253 253 253 253
34241-253 253 253 231 231 231 18 18 18 2 2 6
34242- 2 2 6 2 2 6 2 2 6 2 2 6
34243- 2 2 6 2 2 6 18 18 18 94 94 94
34244- 54 54 54 26 26 26 10 10 10 0 0 0
34245- 0 0 0 0 0 0 0 0 0 0 0 0
34246- 0 0 0 0 0 0 0 0 0 0 0 0
34247- 0 0 0 0 0 0 0 0 0 0 0 0
34248- 0 0 0 0 0 0 0 0 0 0 0 0
34249- 0 0 0 0 0 0 0 0 0 0 0 0
34250- 0 0 0 0 0 0 0 0 0 0 0 0
34251- 0 0 0 0 0 0 0 0 0 0 0 0
34252- 0 0 0 0 0 0 0 0 0 0 0 0
34253- 0 0 0 0 0 0 0 0 0 0 0 0
34254- 0 0 0 6 6 6 22 22 22 50 50 50
34255- 90 90 90 26 26 26 2 2 6 2 2 6
34256- 14 14 14 195 195 195 250 250 250 253 253 253
34257-253 253 253 253 253 253 253 253 253 253 253 253
34258-253 253 253 253 253 253 253 253 253 253 253 253
34259-253 253 253 253 253 253 253 253 253 253 253 253
34260-253 253 253 253 253 253 253 253 253 253 253 253
34261-250 250 250 242 242 242 54 54 54 2 2 6
34262- 2 2 6 2 2 6 2 2 6 2 2 6
34263- 2 2 6 2 2 6 2 2 6 38 38 38
34264- 86 86 86 50 50 50 22 22 22 6 6 6
34265- 0 0 0 0 0 0 0 0 0 0 0 0
34266- 0 0 0 0 0 0 0 0 0 0 0 0
34267- 0 0 0 0 0 0 0 0 0 0 0 0
34268- 0 0 0 0 0 0 0 0 0 0 0 0
34269- 0 0 0 0 0 0 0 0 0 0 0 0
34270- 0 0 0 0 0 0 0 0 0 0 0 0
34271- 0 0 0 0 0 0 0 0 0 0 0 0
34272- 0 0 0 0 0 0 0 0 0 0 0 0
34273- 0 0 0 0 0 0 0 0 0 0 0 0
34274- 6 6 6 14 14 14 38 38 38 82 82 82
34275- 34 34 34 2 2 6 2 2 6 2 2 6
34276- 42 42 42 195 195 195 246 246 246 253 253 253
34277-253 253 253 253 253 253 253 253 253 250 250 250
34278-242 242 242 242 242 242 250 250 250 253 253 253
34279-253 253 253 253 253 253 253 253 253 253 253 253
34280-253 253 253 250 250 250 246 246 246 238 238 238
34281-226 226 226 231 231 231 101 101 101 6 6 6
34282- 2 2 6 2 2 6 2 2 6 2 2 6
34283- 2 2 6 2 2 6 2 2 6 2 2 6
34284- 38 38 38 82 82 82 42 42 42 14 14 14
34285- 6 6 6 0 0 0 0 0 0 0 0 0
34286- 0 0 0 0 0 0 0 0 0 0 0 0
34287- 0 0 0 0 0 0 0 0 0 0 0 0
34288- 0 0 0 0 0 0 0 0 0 0 0 0
34289- 0 0 0 0 0 0 0 0 0 0 0 0
34290- 0 0 0 0 0 0 0 0 0 0 0 0
34291- 0 0 0 0 0 0 0 0 0 0 0 0
34292- 0 0 0 0 0 0 0 0 0 0 0 0
34293- 0 0 0 0 0 0 0 0 0 0 0 0
34294- 10 10 10 26 26 26 62 62 62 66 66 66
34295- 2 2 6 2 2 6 2 2 6 6 6 6
34296- 70 70 70 170 170 170 206 206 206 234 234 234
34297-246 246 246 250 250 250 250 250 250 238 238 238
34298-226 226 226 231 231 231 238 238 238 250 250 250
34299-250 250 250 250 250 250 246 246 246 231 231 231
34300-214 214 214 206 206 206 202 202 202 202 202 202
34301-198 198 198 202 202 202 182 182 182 18 18 18
34302- 2 2 6 2 2 6 2 2 6 2 2 6
34303- 2 2 6 2 2 6 2 2 6 2 2 6
34304- 2 2 6 62 62 62 66 66 66 30 30 30
34305- 10 10 10 0 0 0 0 0 0 0 0 0
34306- 0 0 0 0 0 0 0 0 0 0 0 0
34307- 0 0 0 0 0 0 0 0 0 0 0 0
34308- 0 0 0 0 0 0 0 0 0 0 0 0
34309- 0 0 0 0 0 0 0 0 0 0 0 0
34310- 0 0 0 0 0 0 0 0 0 0 0 0
34311- 0 0 0 0 0 0 0 0 0 0 0 0
34312- 0 0 0 0 0 0 0 0 0 0 0 0
34313- 0 0 0 0 0 0 0 0 0 0 0 0
34314- 14 14 14 42 42 42 82 82 82 18 18 18
34315- 2 2 6 2 2 6 2 2 6 10 10 10
34316- 94 94 94 182 182 182 218 218 218 242 242 242
34317-250 250 250 253 253 253 253 253 253 250 250 250
34318-234 234 234 253 253 253 253 253 253 253 253 253
34319-253 253 253 253 253 253 253 253 253 246 246 246
34320-238 238 238 226 226 226 210 210 210 202 202 202
34321-195 195 195 195 195 195 210 210 210 158 158 158
34322- 6 6 6 14 14 14 50 50 50 14 14 14
34323- 2 2 6 2 2 6 2 2 6 2 2 6
34324- 2 2 6 6 6 6 86 86 86 46 46 46
34325- 18 18 18 6 6 6 0 0 0 0 0 0
34326- 0 0 0 0 0 0 0 0 0 0 0 0
34327- 0 0 0 0 0 0 0 0 0 0 0 0
34328- 0 0 0 0 0 0 0 0 0 0 0 0
34329- 0 0 0 0 0 0 0 0 0 0 0 0
34330- 0 0 0 0 0 0 0 0 0 0 0 0
34331- 0 0 0 0 0 0 0 0 0 0 0 0
34332- 0 0 0 0 0 0 0 0 0 0 0 0
34333- 0 0 0 0 0 0 0 0 0 6 6 6
34334- 22 22 22 54 54 54 70 70 70 2 2 6
34335- 2 2 6 10 10 10 2 2 6 22 22 22
34336-166 166 166 231 231 231 250 250 250 253 253 253
34337-253 253 253 253 253 253 253 253 253 250 250 250
34338-242 242 242 253 253 253 253 253 253 253 253 253
34339-253 253 253 253 253 253 253 253 253 253 253 253
34340-253 253 253 253 253 253 253 253 253 246 246 246
34341-231 231 231 206 206 206 198 198 198 226 226 226
34342- 94 94 94 2 2 6 6 6 6 38 38 38
34343- 30 30 30 2 2 6 2 2 6 2 2 6
34344- 2 2 6 2 2 6 62 62 62 66 66 66
34345- 26 26 26 10 10 10 0 0 0 0 0 0
34346- 0 0 0 0 0 0 0 0 0 0 0 0
34347- 0 0 0 0 0 0 0 0 0 0 0 0
34348- 0 0 0 0 0 0 0 0 0 0 0 0
34349- 0 0 0 0 0 0 0 0 0 0 0 0
34350- 0 0 0 0 0 0 0 0 0 0 0 0
34351- 0 0 0 0 0 0 0 0 0 0 0 0
34352- 0 0 0 0 0 0 0 0 0 0 0 0
34353- 0 0 0 0 0 0 0 0 0 10 10 10
34354- 30 30 30 74 74 74 50 50 50 2 2 6
34355- 26 26 26 26 26 26 2 2 6 106 106 106
34356-238 238 238 253 253 253 253 253 253 253 253 253
34357-253 253 253 253 253 253 253 253 253 253 253 253
34358-253 253 253 253 253 253 253 253 253 253 253 253
34359-253 253 253 253 253 253 253 253 253 253 253 253
34360-253 253 253 253 253 253 253 253 253 253 253 253
34361-253 253 253 246 246 246 218 218 218 202 202 202
34362-210 210 210 14 14 14 2 2 6 2 2 6
34363- 30 30 30 22 22 22 2 2 6 2 2 6
34364- 2 2 6 2 2 6 18 18 18 86 86 86
34365- 42 42 42 14 14 14 0 0 0 0 0 0
34366- 0 0 0 0 0 0 0 0 0 0 0 0
34367- 0 0 0 0 0 0 0 0 0 0 0 0
34368- 0 0 0 0 0 0 0 0 0 0 0 0
34369- 0 0 0 0 0 0 0 0 0 0 0 0
34370- 0 0 0 0 0 0 0 0 0 0 0 0
34371- 0 0 0 0 0 0 0 0 0 0 0 0
34372- 0 0 0 0 0 0 0 0 0 0 0 0
34373- 0 0 0 0 0 0 0 0 0 14 14 14
34374- 42 42 42 90 90 90 22 22 22 2 2 6
34375- 42 42 42 2 2 6 18 18 18 218 218 218
34376-253 253 253 253 253 253 253 253 253 253 253 253
34377-253 253 253 253 253 253 253 253 253 253 253 253
34378-253 253 253 253 253 253 253 253 253 253 253 253
34379-253 253 253 253 253 253 253 253 253 253 253 253
34380-253 253 253 253 253 253 253 253 253 253 253 253
34381-253 253 253 253 253 253 250 250 250 221 221 221
34382-218 218 218 101 101 101 2 2 6 14 14 14
34383- 18 18 18 38 38 38 10 10 10 2 2 6
34384- 2 2 6 2 2 6 2 2 6 78 78 78
34385- 58 58 58 22 22 22 6 6 6 0 0 0
34386- 0 0 0 0 0 0 0 0 0 0 0 0
34387- 0 0 0 0 0 0 0 0 0 0 0 0
34388- 0 0 0 0 0 0 0 0 0 0 0 0
34389- 0 0 0 0 0 0 0 0 0 0 0 0
34390- 0 0 0 0 0 0 0 0 0 0 0 0
34391- 0 0 0 0 0 0 0 0 0 0 0 0
34392- 0 0 0 0 0 0 0 0 0 0 0 0
34393- 0 0 0 0 0 0 6 6 6 18 18 18
34394- 54 54 54 82 82 82 2 2 6 26 26 26
34395- 22 22 22 2 2 6 123 123 123 253 253 253
34396-253 253 253 253 253 253 253 253 253 253 253 253
34397-253 253 253 253 253 253 253 253 253 253 253 253
34398-253 253 253 253 253 253 253 253 253 253 253 253
34399-253 253 253 253 253 253 253 253 253 253 253 253
34400-253 253 253 253 253 253 253 253 253 253 253 253
34401-253 253 253 253 253 253 253 253 253 250 250 250
34402-238 238 238 198 198 198 6 6 6 38 38 38
34403- 58 58 58 26 26 26 38 38 38 2 2 6
34404- 2 2 6 2 2 6 2 2 6 46 46 46
34405- 78 78 78 30 30 30 10 10 10 0 0 0
34406- 0 0 0 0 0 0 0 0 0 0 0 0
34407- 0 0 0 0 0 0 0 0 0 0 0 0
34408- 0 0 0 0 0 0 0 0 0 0 0 0
34409- 0 0 0 0 0 0 0 0 0 0 0 0
34410- 0 0 0 0 0 0 0 0 0 0 0 0
34411- 0 0 0 0 0 0 0 0 0 0 0 0
34412- 0 0 0 0 0 0 0 0 0 0 0 0
34413- 0 0 0 0 0 0 10 10 10 30 30 30
34414- 74 74 74 58 58 58 2 2 6 42 42 42
34415- 2 2 6 22 22 22 231 231 231 253 253 253
34416-253 253 253 253 253 253 253 253 253 253 253 253
34417-253 253 253 253 253 253 253 253 253 250 250 250
34418-253 253 253 253 253 253 253 253 253 253 253 253
34419-253 253 253 253 253 253 253 253 253 253 253 253
34420-253 253 253 253 253 253 253 253 253 253 253 253
34421-253 253 253 253 253 253 253 253 253 253 253 253
34422-253 253 253 246 246 246 46 46 46 38 38 38
34423- 42 42 42 14 14 14 38 38 38 14 14 14
34424- 2 2 6 2 2 6 2 2 6 6 6 6
34425- 86 86 86 46 46 46 14 14 14 0 0 0
34426- 0 0 0 0 0 0 0 0 0 0 0 0
34427- 0 0 0 0 0 0 0 0 0 0 0 0
34428- 0 0 0 0 0 0 0 0 0 0 0 0
34429- 0 0 0 0 0 0 0 0 0 0 0 0
34430- 0 0 0 0 0 0 0 0 0 0 0 0
34431- 0 0 0 0 0 0 0 0 0 0 0 0
34432- 0 0 0 0 0 0 0 0 0 0 0 0
34433- 0 0 0 6 6 6 14 14 14 42 42 42
34434- 90 90 90 18 18 18 18 18 18 26 26 26
34435- 2 2 6 116 116 116 253 253 253 253 253 253
34436-253 253 253 253 253 253 253 253 253 253 253 253
34437-253 253 253 253 253 253 250 250 250 238 238 238
34438-253 253 253 253 253 253 253 253 253 253 253 253
34439-253 253 253 253 253 253 253 253 253 253 253 253
34440-253 253 253 253 253 253 253 253 253 253 253 253
34441-253 253 253 253 253 253 253 253 253 253 253 253
34442-253 253 253 253 253 253 94 94 94 6 6 6
34443- 2 2 6 2 2 6 10 10 10 34 34 34
34444- 2 2 6 2 2 6 2 2 6 2 2 6
34445- 74 74 74 58 58 58 22 22 22 6 6 6
34446- 0 0 0 0 0 0 0 0 0 0 0 0
34447- 0 0 0 0 0 0 0 0 0 0 0 0
34448- 0 0 0 0 0 0 0 0 0 0 0 0
34449- 0 0 0 0 0 0 0 0 0 0 0 0
34450- 0 0 0 0 0 0 0 0 0 0 0 0
34451- 0 0 0 0 0 0 0 0 0 0 0 0
34452- 0 0 0 0 0 0 0 0 0 0 0 0
34453- 0 0 0 10 10 10 26 26 26 66 66 66
34454- 82 82 82 2 2 6 38 38 38 6 6 6
34455- 14 14 14 210 210 210 253 253 253 253 253 253
34456-253 253 253 253 253 253 253 253 253 253 253 253
34457-253 253 253 253 253 253 246 246 246 242 242 242
34458-253 253 253 253 253 253 253 253 253 253 253 253
34459-253 253 253 253 253 253 253 253 253 253 253 253
34460-253 253 253 253 253 253 253 253 253 253 253 253
34461-253 253 253 253 253 253 253 253 253 253 253 253
34462-253 253 253 253 253 253 144 144 144 2 2 6
34463- 2 2 6 2 2 6 2 2 6 46 46 46
34464- 2 2 6 2 2 6 2 2 6 2 2 6
34465- 42 42 42 74 74 74 30 30 30 10 10 10
34466- 0 0 0 0 0 0 0 0 0 0 0 0
34467- 0 0 0 0 0 0 0 0 0 0 0 0
34468- 0 0 0 0 0 0 0 0 0 0 0 0
34469- 0 0 0 0 0 0 0 0 0 0 0 0
34470- 0 0 0 0 0 0 0 0 0 0 0 0
34471- 0 0 0 0 0 0 0 0 0 0 0 0
34472- 0 0 0 0 0 0 0 0 0 0 0 0
34473- 6 6 6 14 14 14 42 42 42 90 90 90
34474- 26 26 26 6 6 6 42 42 42 2 2 6
34475- 74 74 74 250 250 250 253 253 253 253 253 253
34476-253 253 253 253 253 253 253 253 253 253 253 253
34477-253 253 253 253 253 253 242 242 242 242 242 242
34478-253 253 253 253 253 253 253 253 253 253 253 253
34479-253 253 253 253 253 253 253 253 253 253 253 253
34480-253 253 253 253 253 253 253 253 253 253 253 253
34481-253 253 253 253 253 253 253 253 253 253 253 253
34482-253 253 253 253 253 253 182 182 182 2 2 6
34483- 2 2 6 2 2 6 2 2 6 46 46 46
34484- 2 2 6 2 2 6 2 2 6 2 2 6
34485- 10 10 10 86 86 86 38 38 38 10 10 10
34486- 0 0 0 0 0 0 0 0 0 0 0 0
34487- 0 0 0 0 0 0 0 0 0 0 0 0
34488- 0 0 0 0 0 0 0 0 0 0 0 0
34489- 0 0 0 0 0 0 0 0 0 0 0 0
34490- 0 0 0 0 0 0 0 0 0 0 0 0
34491- 0 0 0 0 0 0 0 0 0 0 0 0
34492- 0 0 0 0 0 0 0 0 0 0 0 0
34493- 10 10 10 26 26 26 66 66 66 82 82 82
34494- 2 2 6 22 22 22 18 18 18 2 2 6
34495-149 149 149 253 253 253 253 253 253 253 253 253
34496-253 253 253 253 253 253 253 253 253 253 253 253
34497-253 253 253 253 253 253 234 234 234 242 242 242
34498-253 253 253 253 253 253 253 253 253 253 253 253
34499-253 253 253 253 253 253 253 253 253 253 253 253
34500-253 253 253 253 253 253 253 253 253 253 253 253
34501-253 253 253 253 253 253 253 253 253 253 253 253
34502-253 253 253 253 253 253 206 206 206 2 2 6
34503- 2 2 6 2 2 6 2 2 6 38 38 38
34504- 2 2 6 2 2 6 2 2 6 2 2 6
34505- 6 6 6 86 86 86 46 46 46 14 14 14
34506- 0 0 0 0 0 0 0 0 0 0 0 0
34507- 0 0 0 0 0 0 0 0 0 0 0 0
34508- 0 0 0 0 0 0 0 0 0 0 0 0
34509- 0 0 0 0 0 0 0 0 0 0 0 0
34510- 0 0 0 0 0 0 0 0 0 0 0 0
34511- 0 0 0 0 0 0 0 0 0 0 0 0
34512- 0 0 0 0 0 0 0 0 0 6 6 6
34513- 18 18 18 46 46 46 86 86 86 18 18 18
34514- 2 2 6 34 34 34 10 10 10 6 6 6
34515-210 210 210 253 253 253 253 253 253 253 253 253
34516-253 253 253 253 253 253 253 253 253 253 253 253
34517-253 253 253 253 253 253 234 234 234 242 242 242
34518-253 253 253 253 253 253 253 253 253 253 253 253
34519-253 253 253 253 253 253 253 253 253 253 253 253
34520-253 253 253 253 253 253 253 253 253 253 253 253
34521-253 253 253 253 253 253 253 253 253 253 253 253
34522-253 253 253 253 253 253 221 221 221 6 6 6
34523- 2 2 6 2 2 6 6 6 6 30 30 30
34524- 2 2 6 2 2 6 2 2 6 2 2 6
34525- 2 2 6 82 82 82 54 54 54 18 18 18
34526- 6 6 6 0 0 0 0 0 0 0 0 0
34527- 0 0 0 0 0 0 0 0 0 0 0 0
34528- 0 0 0 0 0 0 0 0 0 0 0 0
34529- 0 0 0 0 0 0 0 0 0 0 0 0
34530- 0 0 0 0 0 0 0 0 0 0 0 0
34531- 0 0 0 0 0 0 0 0 0 0 0 0
34532- 0 0 0 0 0 0 0 0 0 10 10 10
34533- 26 26 26 66 66 66 62 62 62 2 2 6
34534- 2 2 6 38 38 38 10 10 10 26 26 26
34535-238 238 238 253 253 253 253 253 253 253 253 253
34536-253 253 253 253 253 253 253 253 253 253 253 253
34537-253 253 253 253 253 253 231 231 231 238 238 238
34538-253 253 253 253 253 253 253 253 253 253 253 253
34539-253 253 253 253 253 253 253 253 253 253 253 253
34540-253 253 253 253 253 253 253 253 253 253 253 253
34541-253 253 253 253 253 253 253 253 253 253 253 253
34542-253 253 253 253 253 253 231 231 231 6 6 6
34543- 2 2 6 2 2 6 10 10 10 30 30 30
34544- 2 2 6 2 2 6 2 2 6 2 2 6
34545- 2 2 6 66 66 66 58 58 58 22 22 22
34546- 6 6 6 0 0 0 0 0 0 0 0 0
34547- 0 0 0 0 0 0 0 0 0 0 0 0
34548- 0 0 0 0 0 0 0 0 0 0 0 0
34549- 0 0 0 0 0 0 0 0 0 0 0 0
34550- 0 0 0 0 0 0 0 0 0 0 0 0
34551- 0 0 0 0 0 0 0 0 0 0 0 0
34552- 0 0 0 0 0 0 0 0 0 10 10 10
34553- 38 38 38 78 78 78 6 6 6 2 2 6
34554- 2 2 6 46 46 46 14 14 14 42 42 42
34555-246 246 246 253 253 253 253 253 253 253 253 253
34556-253 253 253 253 253 253 253 253 253 253 253 253
34557-253 253 253 253 253 253 231 231 231 242 242 242
34558-253 253 253 253 253 253 253 253 253 253 253 253
34559-253 253 253 253 253 253 253 253 253 253 253 253
34560-253 253 253 253 253 253 253 253 253 253 253 253
34561-253 253 253 253 253 253 253 253 253 253 253 253
34562-253 253 253 253 253 253 234 234 234 10 10 10
34563- 2 2 6 2 2 6 22 22 22 14 14 14
34564- 2 2 6 2 2 6 2 2 6 2 2 6
34565- 2 2 6 66 66 66 62 62 62 22 22 22
34566- 6 6 6 0 0 0 0 0 0 0 0 0
34567- 0 0 0 0 0 0 0 0 0 0 0 0
34568- 0 0 0 0 0 0 0 0 0 0 0 0
34569- 0 0 0 0 0 0 0 0 0 0 0 0
34570- 0 0 0 0 0 0 0 0 0 0 0 0
34571- 0 0 0 0 0 0 0 0 0 0 0 0
34572- 0 0 0 0 0 0 6 6 6 18 18 18
34573- 50 50 50 74 74 74 2 2 6 2 2 6
34574- 14 14 14 70 70 70 34 34 34 62 62 62
34575-250 250 250 253 253 253 253 253 253 253 253 253
34576-253 253 253 253 253 253 253 253 253 253 253 253
34577-253 253 253 253 253 253 231 231 231 246 246 246
34578-253 253 253 253 253 253 253 253 253 253 253 253
34579-253 253 253 253 253 253 253 253 253 253 253 253
34580-253 253 253 253 253 253 253 253 253 253 253 253
34581-253 253 253 253 253 253 253 253 253 253 253 253
34582-253 253 253 253 253 253 234 234 234 14 14 14
34583- 2 2 6 2 2 6 30 30 30 2 2 6
34584- 2 2 6 2 2 6 2 2 6 2 2 6
34585- 2 2 6 66 66 66 62 62 62 22 22 22
34586- 6 6 6 0 0 0 0 0 0 0 0 0
34587- 0 0 0 0 0 0 0 0 0 0 0 0
34588- 0 0 0 0 0 0 0 0 0 0 0 0
34589- 0 0 0 0 0 0 0 0 0 0 0 0
34590- 0 0 0 0 0 0 0 0 0 0 0 0
34591- 0 0 0 0 0 0 0 0 0 0 0 0
34592- 0 0 0 0 0 0 6 6 6 18 18 18
34593- 54 54 54 62 62 62 2 2 6 2 2 6
34594- 2 2 6 30 30 30 46 46 46 70 70 70
34595-250 250 250 253 253 253 253 253 253 253 253 253
34596-253 253 253 253 253 253 253 253 253 253 253 253
34597-253 253 253 253 253 253 231 231 231 246 246 246
34598-253 253 253 253 253 253 253 253 253 253 253 253
34599-253 253 253 253 253 253 253 253 253 253 253 253
34600-253 253 253 253 253 253 253 253 253 253 253 253
34601-253 253 253 253 253 253 253 253 253 253 253 253
34602-253 253 253 253 253 253 226 226 226 10 10 10
34603- 2 2 6 6 6 6 30 30 30 2 2 6
34604- 2 2 6 2 2 6 2 2 6 2 2 6
34605- 2 2 6 66 66 66 58 58 58 22 22 22
34606- 6 6 6 0 0 0 0 0 0 0 0 0
34607- 0 0 0 0 0 0 0 0 0 0 0 0
34608- 0 0 0 0 0 0 0 0 0 0 0 0
34609- 0 0 0 0 0 0 0 0 0 0 0 0
34610- 0 0 0 0 0 0 0 0 0 0 0 0
34611- 0 0 0 0 0 0 0 0 0 0 0 0
34612- 0 0 0 0 0 0 6 6 6 22 22 22
34613- 58 58 58 62 62 62 2 2 6 2 2 6
34614- 2 2 6 2 2 6 30 30 30 78 78 78
34615-250 250 250 253 253 253 253 253 253 253 253 253
34616-253 253 253 253 253 253 253 253 253 253 253 253
34617-253 253 253 253 253 253 231 231 231 246 246 246
34618-253 253 253 253 253 253 253 253 253 253 253 253
34619-253 253 253 253 253 253 253 253 253 253 253 253
34620-253 253 253 253 253 253 253 253 253 253 253 253
34621-253 253 253 253 253 253 253 253 253 253 253 253
34622-253 253 253 253 253 253 206 206 206 2 2 6
34623- 22 22 22 34 34 34 18 14 6 22 22 22
34624- 26 26 26 18 18 18 6 6 6 2 2 6
34625- 2 2 6 82 82 82 54 54 54 18 18 18
34626- 6 6 6 0 0 0 0 0 0 0 0 0
34627- 0 0 0 0 0 0 0 0 0 0 0 0
34628- 0 0 0 0 0 0 0 0 0 0 0 0
34629- 0 0 0 0 0 0 0 0 0 0 0 0
34630- 0 0 0 0 0 0 0 0 0 0 0 0
34631- 0 0 0 0 0 0 0 0 0 0 0 0
34632- 0 0 0 0 0 0 6 6 6 26 26 26
34633- 62 62 62 106 106 106 74 54 14 185 133 11
34634-210 162 10 121 92 8 6 6 6 62 62 62
34635-238 238 238 253 253 253 253 253 253 253 253 253
34636-253 253 253 253 253 253 253 253 253 253 253 253
34637-253 253 253 253 253 253 231 231 231 246 246 246
34638-253 253 253 253 253 253 253 253 253 253 253 253
34639-253 253 253 253 253 253 253 253 253 253 253 253
34640-253 253 253 253 253 253 253 253 253 253 253 253
34641-253 253 253 253 253 253 253 253 253 253 253 253
34642-253 253 253 253 253 253 158 158 158 18 18 18
34643- 14 14 14 2 2 6 2 2 6 2 2 6
34644- 6 6 6 18 18 18 66 66 66 38 38 38
34645- 6 6 6 94 94 94 50 50 50 18 18 18
34646- 6 6 6 0 0 0 0 0 0 0 0 0
34647- 0 0 0 0 0 0 0 0 0 0 0 0
34648- 0 0 0 0 0 0 0 0 0 0 0 0
34649- 0 0 0 0 0 0 0 0 0 0 0 0
34650- 0 0 0 0 0 0 0 0 0 0 0 0
34651- 0 0 0 0 0 0 0 0 0 6 6 6
34652- 10 10 10 10 10 10 18 18 18 38 38 38
34653- 78 78 78 142 134 106 216 158 10 242 186 14
34654-246 190 14 246 190 14 156 118 10 10 10 10
34655- 90 90 90 238 238 238 253 253 253 253 253 253
34656-253 253 253 253 253 253 253 253 253 253 253 253
34657-253 253 253 253 253 253 231 231 231 250 250 250
34658-253 253 253 253 253 253 253 253 253 253 253 253
34659-253 253 253 253 253 253 253 253 253 253 253 253
34660-253 253 253 253 253 253 253 253 253 253 253 253
34661-253 253 253 253 253 253 253 253 253 246 230 190
34662-238 204 91 238 204 91 181 142 44 37 26 9
34663- 2 2 6 2 2 6 2 2 6 2 2 6
34664- 2 2 6 2 2 6 38 38 38 46 46 46
34665- 26 26 26 106 106 106 54 54 54 18 18 18
34666- 6 6 6 0 0 0 0 0 0 0 0 0
34667- 0 0 0 0 0 0 0 0 0 0 0 0
34668- 0 0 0 0 0 0 0 0 0 0 0 0
34669- 0 0 0 0 0 0 0 0 0 0 0 0
34670- 0 0 0 0 0 0 0 0 0 0 0 0
34671- 0 0 0 6 6 6 14 14 14 22 22 22
34672- 30 30 30 38 38 38 50 50 50 70 70 70
34673-106 106 106 190 142 34 226 170 11 242 186 14
34674-246 190 14 246 190 14 246 190 14 154 114 10
34675- 6 6 6 74 74 74 226 226 226 253 253 253
34676-253 253 253 253 253 253 253 253 253 253 253 253
34677-253 253 253 253 253 253 231 231 231 250 250 250
34678-253 253 253 253 253 253 253 253 253 253 253 253
34679-253 253 253 253 253 253 253 253 253 253 253 253
34680-253 253 253 253 253 253 253 253 253 253 253 253
34681-253 253 253 253 253 253 253 253 253 228 184 62
34682-241 196 14 241 208 19 232 195 16 38 30 10
34683- 2 2 6 2 2 6 2 2 6 2 2 6
34684- 2 2 6 6 6 6 30 30 30 26 26 26
34685-203 166 17 154 142 90 66 66 66 26 26 26
34686- 6 6 6 0 0 0 0 0 0 0 0 0
34687- 0 0 0 0 0 0 0 0 0 0 0 0
34688- 0 0 0 0 0 0 0 0 0 0 0 0
34689- 0 0 0 0 0 0 0 0 0 0 0 0
34690- 0 0 0 0 0 0 0 0 0 0 0 0
34691- 6 6 6 18 18 18 38 38 38 58 58 58
34692- 78 78 78 86 86 86 101 101 101 123 123 123
34693-175 146 61 210 150 10 234 174 13 246 186 14
34694-246 190 14 246 190 14 246 190 14 238 190 10
34695-102 78 10 2 2 6 46 46 46 198 198 198
34696-253 253 253 253 253 253 253 253 253 253 253 253
34697-253 253 253 253 253 253 234 234 234 242 242 242
34698-253 253 253 253 253 253 253 253 253 253 253 253
34699-253 253 253 253 253 253 253 253 253 253 253 253
34700-253 253 253 253 253 253 253 253 253 253 253 253
34701-253 253 253 253 253 253 253 253 253 224 178 62
34702-242 186 14 241 196 14 210 166 10 22 18 6
34703- 2 2 6 2 2 6 2 2 6 2 2 6
34704- 2 2 6 2 2 6 6 6 6 121 92 8
34705-238 202 15 232 195 16 82 82 82 34 34 34
34706- 10 10 10 0 0 0 0 0 0 0 0 0
34707- 0 0 0 0 0 0 0 0 0 0 0 0
34708- 0 0 0 0 0 0 0 0 0 0 0 0
34709- 0 0 0 0 0 0 0 0 0 0 0 0
34710- 0 0 0 0 0 0 0 0 0 0 0 0
34711- 14 14 14 38 38 38 70 70 70 154 122 46
34712-190 142 34 200 144 11 197 138 11 197 138 11
34713-213 154 11 226 170 11 242 186 14 246 190 14
34714-246 190 14 246 190 14 246 190 14 246 190 14
34715-225 175 15 46 32 6 2 2 6 22 22 22
34716-158 158 158 250 250 250 253 253 253 253 253 253
34717-253 253 253 253 253 253 253 253 253 253 253 253
34718-253 253 253 253 253 253 253 253 253 253 253 253
34719-253 253 253 253 253 253 253 253 253 253 253 253
34720-253 253 253 253 253 253 253 253 253 253 253 253
34721-253 253 253 250 250 250 242 242 242 224 178 62
34722-239 182 13 236 186 11 213 154 11 46 32 6
34723- 2 2 6 2 2 6 2 2 6 2 2 6
34724- 2 2 6 2 2 6 61 42 6 225 175 15
34725-238 190 10 236 186 11 112 100 78 42 42 42
34726- 14 14 14 0 0 0 0 0 0 0 0 0
34727- 0 0 0 0 0 0 0 0 0 0 0 0
34728- 0 0 0 0 0 0 0 0 0 0 0 0
34729- 0 0 0 0 0 0 0 0 0 0 0 0
34730- 0 0 0 0 0 0 0 0 0 6 6 6
34731- 22 22 22 54 54 54 154 122 46 213 154 11
34732-226 170 11 230 174 11 226 170 11 226 170 11
34733-236 178 12 242 186 14 246 190 14 246 190 14
34734-246 190 14 246 190 14 246 190 14 246 190 14
34735-241 196 14 184 144 12 10 10 10 2 2 6
34736- 6 6 6 116 116 116 242 242 242 253 253 253
34737-253 253 253 253 253 253 253 253 253 253 253 253
34738-253 253 253 253 253 253 253 253 253 253 253 253
34739-253 253 253 253 253 253 253 253 253 253 253 253
34740-253 253 253 253 253 253 253 253 253 253 253 253
34741-253 253 253 231 231 231 198 198 198 214 170 54
34742-236 178 12 236 178 12 210 150 10 137 92 6
34743- 18 14 6 2 2 6 2 2 6 2 2 6
34744- 6 6 6 70 47 6 200 144 11 236 178 12
34745-239 182 13 239 182 13 124 112 88 58 58 58
34746- 22 22 22 6 6 6 0 0 0 0 0 0
34747- 0 0 0 0 0 0 0 0 0 0 0 0
34748- 0 0 0 0 0 0 0 0 0 0 0 0
34749- 0 0 0 0 0 0 0 0 0 0 0 0
34750- 0 0 0 0 0 0 0 0 0 10 10 10
34751- 30 30 30 70 70 70 180 133 36 226 170 11
34752-239 182 13 242 186 14 242 186 14 246 186 14
34753-246 190 14 246 190 14 246 190 14 246 190 14
34754-246 190 14 246 190 14 246 190 14 246 190 14
34755-246 190 14 232 195 16 98 70 6 2 2 6
34756- 2 2 6 2 2 6 66 66 66 221 221 221
34757-253 253 253 253 253 253 253 253 253 253 253 253
34758-253 253 253 253 253 253 253 253 253 253 253 253
34759-253 253 253 253 253 253 253 253 253 253 253 253
34760-253 253 253 253 253 253 253 253 253 253 253 253
34761-253 253 253 206 206 206 198 198 198 214 166 58
34762-230 174 11 230 174 11 216 158 10 192 133 9
34763-163 110 8 116 81 8 102 78 10 116 81 8
34764-167 114 7 197 138 11 226 170 11 239 182 13
34765-242 186 14 242 186 14 162 146 94 78 78 78
34766- 34 34 34 14 14 14 6 6 6 0 0 0
34767- 0 0 0 0 0 0 0 0 0 0 0 0
34768- 0 0 0 0 0 0 0 0 0 0 0 0
34769- 0 0 0 0 0 0 0 0 0 0 0 0
34770- 0 0 0 0 0 0 0 0 0 6 6 6
34771- 30 30 30 78 78 78 190 142 34 226 170 11
34772-239 182 13 246 190 14 246 190 14 246 190 14
34773-246 190 14 246 190 14 246 190 14 246 190 14
34774-246 190 14 246 190 14 246 190 14 246 190 14
34775-246 190 14 241 196 14 203 166 17 22 18 6
34776- 2 2 6 2 2 6 2 2 6 38 38 38
34777-218 218 218 253 253 253 253 253 253 253 253 253
34778-253 253 253 253 253 253 253 253 253 253 253 253
34779-253 253 253 253 253 253 253 253 253 253 253 253
34780-253 253 253 253 253 253 253 253 253 253 253 253
34781-250 250 250 206 206 206 198 198 198 202 162 69
34782-226 170 11 236 178 12 224 166 10 210 150 10
34783-200 144 11 197 138 11 192 133 9 197 138 11
34784-210 150 10 226 170 11 242 186 14 246 190 14
34785-246 190 14 246 186 14 225 175 15 124 112 88
34786- 62 62 62 30 30 30 14 14 14 6 6 6
34787- 0 0 0 0 0 0 0 0 0 0 0 0
34788- 0 0 0 0 0 0 0 0 0 0 0 0
34789- 0 0 0 0 0 0 0 0 0 0 0 0
34790- 0 0 0 0 0 0 0 0 0 10 10 10
34791- 30 30 30 78 78 78 174 135 50 224 166 10
34792-239 182 13 246 190 14 246 190 14 246 190 14
34793-246 190 14 246 190 14 246 190 14 246 190 14
34794-246 190 14 246 190 14 246 190 14 246 190 14
34795-246 190 14 246 190 14 241 196 14 139 102 15
34796- 2 2 6 2 2 6 2 2 6 2 2 6
34797- 78 78 78 250 250 250 253 253 253 253 253 253
34798-253 253 253 253 253 253 253 253 253 253 253 253
34799-253 253 253 253 253 253 253 253 253 253 253 253
34800-253 253 253 253 253 253 253 253 253 253 253 253
34801-250 250 250 214 214 214 198 198 198 190 150 46
34802-219 162 10 236 178 12 234 174 13 224 166 10
34803-216 158 10 213 154 11 213 154 11 216 158 10
34804-226 170 11 239 182 13 246 190 14 246 190 14
34805-246 190 14 246 190 14 242 186 14 206 162 42
34806-101 101 101 58 58 58 30 30 30 14 14 14
34807- 6 6 6 0 0 0 0 0 0 0 0 0
34808- 0 0 0 0 0 0 0 0 0 0 0 0
34809- 0 0 0 0 0 0 0 0 0 0 0 0
34810- 0 0 0 0 0 0 0 0 0 10 10 10
34811- 30 30 30 74 74 74 174 135 50 216 158 10
34812-236 178 12 246 190 14 246 190 14 246 190 14
34813-246 190 14 246 190 14 246 190 14 246 190 14
34814-246 190 14 246 190 14 246 190 14 246 190 14
34815-246 190 14 246 190 14 241 196 14 226 184 13
34816- 61 42 6 2 2 6 2 2 6 2 2 6
34817- 22 22 22 238 238 238 253 253 253 253 253 253
34818-253 253 253 253 253 253 253 253 253 253 253 253
34819-253 253 253 253 253 253 253 253 253 253 253 253
34820-253 253 253 253 253 253 253 253 253 253 253 253
34821-253 253 253 226 226 226 187 187 187 180 133 36
34822-216 158 10 236 178 12 239 182 13 236 178 12
34823-230 174 11 226 170 11 226 170 11 230 174 11
34824-236 178 12 242 186 14 246 190 14 246 190 14
34825-246 190 14 246 190 14 246 186 14 239 182 13
34826-206 162 42 106 106 106 66 66 66 34 34 34
34827- 14 14 14 6 6 6 0 0 0 0 0 0
34828- 0 0 0 0 0 0 0 0 0 0 0 0
34829- 0 0 0 0 0 0 0 0 0 0 0 0
34830- 0 0 0 0 0 0 0 0 0 6 6 6
34831- 26 26 26 70 70 70 163 133 67 213 154 11
34832-236 178 12 246 190 14 246 190 14 246 190 14
34833-246 190 14 246 190 14 246 190 14 246 190 14
34834-246 190 14 246 190 14 246 190 14 246 190 14
34835-246 190 14 246 190 14 246 190 14 241 196 14
34836-190 146 13 18 14 6 2 2 6 2 2 6
34837- 46 46 46 246 246 246 253 253 253 253 253 253
34838-253 253 253 253 253 253 253 253 253 253 253 253
34839-253 253 253 253 253 253 253 253 253 253 253 253
34840-253 253 253 253 253 253 253 253 253 253 253 253
34841-253 253 253 221 221 221 86 86 86 156 107 11
34842-216 158 10 236 178 12 242 186 14 246 186 14
34843-242 186 14 239 182 13 239 182 13 242 186 14
34844-242 186 14 246 186 14 246 190 14 246 190 14
34845-246 190 14 246 190 14 246 190 14 246 190 14
34846-242 186 14 225 175 15 142 122 72 66 66 66
34847- 30 30 30 10 10 10 0 0 0 0 0 0
34848- 0 0 0 0 0 0 0 0 0 0 0 0
34849- 0 0 0 0 0 0 0 0 0 0 0 0
34850- 0 0 0 0 0 0 0 0 0 6 6 6
34851- 26 26 26 70 70 70 163 133 67 210 150 10
34852-236 178 12 246 190 14 246 190 14 246 190 14
34853-246 190 14 246 190 14 246 190 14 246 190 14
34854-246 190 14 246 190 14 246 190 14 246 190 14
34855-246 190 14 246 190 14 246 190 14 246 190 14
34856-232 195 16 121 92 8 34 34 34 106 106 106
34857-221 221 221 253 253 253 253 253 253 253 253 253
34858-253 253 253 253 253 253 253 253 253 253 253 253
34859-253 253 253 253 253 253 253 253 253 253 253 253
34860-253 253 253 253 253 253 253 253 253 253 253 253
34861-242 242 242 82 82 82 18 14 6 163 110 8
34862-216 158 10 236 178 12 242 186 14 246 190 14
34863-246 190 14 246 190 14 246 190 14 246 190 14
34864-246 190 14 246 190 14 246 190 14 246 190 14
34865-246 190 14 246 190 14 246 190 14 246 190 14
34866-246 190 14 246 190 14 242 186 14 163 133 67
34867- 46 46 46 18 18 18 6 6 6 0 0 0
34868- 0 0 0 0 0 0 0 0 0 0 0 0
34869- 0 0 0 0 0 0 0 0 0 0 0 0
34870- 0 0 0 0 0 0 0 0 0 10 10 10
34871- 30 30 30 78 78 78 163 133 67 210 150 10
34872-236 178 12 246 186 14 246 190 14 246 190 14
34873-246 190 14 246 190 14 246 190 14 246 190 14
34874-246 190 14 246 190 14 246 190 14 246 190 14
34875-246 190 14 246 190 14 246 190 14 246 190 14
34876-241 196 14 215 174 15 190 178 144 253 253 253
34877-253 253 253 253 253 253 253 253 253 253 253 253
34878-253 253 253 253 253 253 253 253 253 253 253 253
34879-253 253 253 253 253 253 253 253 253 253 253 253
34880-253 253 253 253 253 253 253 253 253 218 218 218
34881- 58 58 58 2 2 6 22 18 6 167 114 7
34882-216 158 10 236 178 12 246 186 14 246 190 14
34883-246 190 14 246 190 14 246 190 14 246 190 14
34884-246 190 14 246 190 14 246 190 14 246 190 14
34885-246 190 14 246 190 14 246 190 14 246 190 14
34886-246 190 14 246 186 14 242 186 14 190 150 46
34887- 54 54 54 22 22 22 6 6 6 0 0 0
34888- 0 0 0 0 0 0 0 0 0 0 0 0
34889- 0 0 0 0 0 0 0 0 0 0 0 0
34890- 0 0 0 0 0 0 0 0 0 14 14 14
34891- 38 38 38 86 86 86 180 133 36 213 154 11
34892-236 178 12 246 186 14 246 190 14 246 190 14
34893-246 190 14 246 190 14 246 190 14 246 190 14
34894-246 190 14 246 190 14 246 190 14 246 190 14
34895-246 190 14 246 190 14 246 190 14 246 190 14
34896-246 190 14 232 195 16 190 146 13 214 214 214
34897-253 253 253 253 253 253 253 253 253 253 253 253
34898-253 253 253 253 253 253 253 253 253 253 253 253
34899-253 253 253 253 253 253 253 253 253 253 253 253
34900-253 253 253 250 250 250 170 170 170 26 26 26
34901- 2 2 6 2 2 6 37 26 9 163 110 8
34902-219 162 10 239 182 13 246 186 14 246 190 14
34903-246 190 14 246 190 14 246 190 14 246 190 14
34904-246 190 14 246 190 14 246 190 14 246 190 14
34905-246 190 14 246 190 14 246 190 14 246 190 14
34906-246 186 14 236 178 12 224 166 10 142 122 72
34907- 46 46 46 18 18 18 6 6 6 0 0 0
34908- 0 0 0 0 0 0 0 0 0 0 0 0
34909- 0 0 0 0 0 0 0 0 0 0 0 0
34910- 0 0 0 0 0 0 6 6 6 18 18 18
34911- 50 50 50 109 106 95 192 133 9 224 166 10
34912-242 186 14 246 190 14 246 190 14 246 190 14
34913-246 190 14 246 190 14 246 190 14 246 190 14
34914-246 190 14 246 190 14 246 190 14 246 190 14
34915-246 190 14 246 190 14 246 190 14 246 190 14
34916-242 186 14 226 184 13 210 162 10 142 110 46
34917-226 226 226 253 253 253 253 253 253 253 253 253
34918-253 253 253 253 253 253 253 253 253 253 253 253
34919-253 253 253 253 253 253 253 253 253 253 253 253
34920-198 198 198 66 66 66 2 2 6 2 2 6
34921- 2 2 6 2 2 6 50 34 6 156 107 11
34922-219 162 10 239 182 13 246 186 14 246 190 14
34923-246 190 14 246 190 14 246 190 14 246 190 14
34924-246 190 14 246 190 14 246 190 14 246 190 14
34925-246 190 14 246 190 14 246 190 14 242 186 14
34926-234 174 13 213 154 11 154 122 46 66 66 66
34927- 30 30 30 10 10 10 0 0 0 0 0 0
34928- 0 0 0 0 0 0 0 0 0 0 0 0
34929- 0 0 0 0 0 0 0 0 0 0 0 0
34930- 0 0 0 0 0 0 6 6 6 22 22 22
34931- 58 58 58 154 121 60 206 145 10 234 174 13
34932-242 186 14 246 186 14 246 190 14 246 190 14
34933-246 190 14 246 190 14 246 190 14 246 190 14
34934-246 190 14 246 190 14 246 190 14 246 190 14
34935-246 190 14 246 190 14 246 190 14 246 190 14
34936-246 186 14 236 178 12 210 162 10 163 110 8
34937- 61 42 6 138 138 138 218 218 218 250 250 250
34938-253 253 253 253 253 253 253 253 253 250 250 250
34939-242 242 242 210 210 210 144 144 144 66 66 66
34940- 6 6 6 2 2 6 2 2 6 2 2 6
34941- 2 2 6 2 2 6 61 42 6 163 110 8
34942-216 158 10 236 178 12 246 190 14 246 190 14
34943-246 190 14 246 190 14 246 190 14 246 190 14
34944-246 190 14 246 190 14 246 190 14 246 190 14
34945-246 190 14 239 182 13 230 174 11 216 158 10
34946-190 142 34 124 112 88 70 70 70 38 38 38
34947- 18 18 18 6 6 6 0 0 0 0 0 0
34948- 0 0 0 0 0 0 0 0 0 0 0 0
34949- 0 0 0 0 0 0 0 0 0 0 0 0
34950- 0 0 0 0 0 0 6 6 6 22 22 22
34951- 62 62 62 168 124 44 206 145 10 224 166 10
34952-236 178 12 239 182 13 242 186 14 242 186 14
34953-246 186 14 246 190 14 246 190 14 246 190 14
34954-246 190 14 246 190 14 246 190 14 246 190 14
34955-246 190 14 246 190 14 246 190 14 246 190 14
34956-246 190 14 236 178 12 216 158 10 175 118 6
34957- 80 54 7 2 2 6 6 6 6 30 30 30
34958- 54 54 54 62 62 62 50 50 50 38 38 38
34959- 14 14 14 2 2 6 2 2 6 2 2 6
34960- 2 2 6 2 2 6 2 2 6 2 2 6
34961- 2 2 6 6 6 6 80 54 7 167 114 7
34962-213 154 11 236 178 12 246 190 14 246 190 14
34963-246 190 14 246 190 14 246 190 14 246 190 14
34964-246 190 14 242 186 14 239 182 13 239 182 13
34965-230 174 11 210 150 10 174 135 50 124 112 88
34966- 82 82 82 54 54 54 34 34 34 18 18 18
34967- 6 6 6 0 0 0 0 0 0 0 0 0
34968- 0 0 0 0 0 0 0 0 0 0 0 0
34969- 0 0 0 0 0 0 0 0 0 0 0 0
34970- 0 0 0 0 0 0 6 6 6 18 18 18
34971- 50 50 50 158 118 36 192 133 9 200 144 11
34972-216 158 10 219 162 10 224 166 10 226 170 11
34973-230 174 11 236 178 12 239 182 13 239 182 13
34974-242 186 14 246 186 14 246 190 14 246 190 14
34975-246 190 14 246 190 14 246 190 14 246 190 14
34976-246 186 14 230 174 11 210 150 10 163 110 8
34977-104 69 6 10 10 10 2 2 6 2 2 6
34978- 2 2 6 2 2 6 2 2 6 2 2 6
34979- 2 2 6 2 2 6 2 2 6 2 2 6
34980- 2 2 6 2 2 6 2 2 6 2 2 6
34981- 2 2 6 6 6 6 91 60 6 167 114 7
34982-206 145 10 230 174 11 242 186 14 246 190 14
34983-246 190 14 246 190 14 246 186 14 242 186 14
34984-239 182 13 230 174 11 224 166 10 213 154 11
34985-180 133 36 124 112 88 86 86 86 58 58 58
34986- 38 38 38 22 22 22 10 10 10 6 6 6
34987- 0 0 0 0 0 0 0 0 0 0 0 0
34988- 0 0 0 0 0 0 0 0 0 0 0 0
34989- 0 0 0 0 0 0 0 0 0 0 0 0
34990- 0 0 0 0 0 0 0 0 0 14 14 14
34991- 34 34 34 70 70 70 138 110 50 158 118 36
34992-167 114 7 180 123 7 192 133 9 197 138 11
34993-200 144 11 206 145 10 213 154 11 219 162 10
34994-224 166 10 230 174 11 239 182 13 242 186 14
34995-246 186 14 246 186 14 246 186 14 246 186 14
34996-239 182 13 216 158 10 185 133 11 152 99 6
34997-104 69 6 18 14 6 2 2 6 2 2 6
34998- 2 2 6 2 2 6 2 2 6 2 2 6
34999- 2 2 6 2 2 6 2 2 6 2 2 6
35000- 2 2 6 2 2 6 2 2 6 2 2 6
35001- 2 2 6 6 6 6 80 54 7 152 99 6
35002-192 133 9 219 162 10 236 178 12 239 182 13
35003-246 186 14 242 186 14 239 182 13 236 178 12
35004-224 166 10 206 145 10 192 133 9 154 121 60
35005- 94 94 94 62 62 62 42 42 42 22 22 22
35006- 14 14 14 6 6 6 0 0 0 0 0 0
35007- 0 0 0 0 0 0 0 0 0 0 0 0
35008- 0 0 0 0 0 0 0 0 0 0 0 0
35009- 0 0 0 0 0 0 0 0 0 0 0 0
35010- 0 0 0 0 0 0 0 0 0 6 6 6
35011- 18 18 18 34 34 34 58 58 58 78 78 78
35012-101 98 89 124 112 88 142 110 46 156 107 11
35013-163 110 8 167 114 7 175 118 6 180 123 7
35014-185 133 11 197 138 11 210 150 10 219 162 10
35015-226 170 11 236 178 12 236 178 12 234 174 13
35016-219 162 10 197 138 11 163 110 8 130 83 6
35017- 91 60 6 10 10 10 2 2 6 2 2 6
35018- 18 18 18 38 38 38 38 38 38 38 38 38
35019- 38 38 38 38 38 38 38 38 38 38 38 38
35020- 38 38 38 38 38 38 26 26 26 2 2 6
35021- 2 2 6 6 6 6 70 47 6 137 92 6
35022-175 118 6 200 144 11 219 162 10 230 174 11
35023-234 174 13 230 174 11 219 162 10 210 150 10
35024-192 133 9 163 110 8 124 112 88 82 82 82
35025- 50 50 50 30 30 30 14 14 14 6 6 6
35026- 0 0 0 0 0 0 0 0 0 0 0 0
35027- 0 0 0 0 0 0 0 0 0 0 0 0
35028- 0 0 0 0 0 0 0 0 0 0 0 0
35029- 0 0 0 0 0 0 0 0 0 0 0 0
35030- 0 0 0 0 0 0 0 0 0 0 0 0
35031- 6 6 6 14 14 14 22 22 22 34 34 34
35032- 42 42 42 58 58 58 74 74 74 86 86 86
35033-101 98 89 122 102 70 130 98 46 121 87 25
35034-137 92 6 152 99 6 163 110 8 180 123 7
35035-185 133 11 197 138 11 206 145 10 200 144 11
35036-180 123 7 156 107 11 130 83 6 104 69 6
35037- 50 34 6 54 54 54 110 110 110 101 98 89
35038- 86 86 86 82 82 82 78 78 78 78 78 78
35039- 78 78 78 78 78 78 78 78 78 78 78 78
35040- 78 78 78 82 82 82 86 86 86 94 94 94
35041-106 106 106 101 101 101 86 66 34 124 80 6
35042-156 107 11 180 123 7 192 133 9 200 144 11
35043-206 145 10 200 144 11 192 133 9 175 118 6
35044-139 102 15 109 106 95 70 70 70 42 42 42
35045- 22 22 22 10 10 10 0 0 0 0 0 0
35046- 0 0 0 0 0 0 0 0 0 0 0 0
35047- 0 0 0 0 0 0 0 0 0 0 0 0
35048- 0 0 0 0 0 0 0 0 0 0 0 0
35049- 0 0 0 0 0 0 0 0 0 0 0 0
35050- 0 0 0 0 0 0 0 0 0 0 0 0
35051- 0 0 0 0 0 0 6 6 6 10 10 10
35052- 14 14 14 22 22 22 30 30 30 38 38 38
35053- 50 50 50 62 62 62 74 74 74 90 90 90
35054-101 98 89 112 100 78 121 87 25 124 80 6
35055-137 92 6 152 99 6 152 99 6 152 99 6
35056-138 86 6 124 80 6 98 70 6 86 66 30
35057-101 98 89 82 82 82 58 58 58 46 46 46
35058- 38 38 38 34 34 34 34 34 34 34 34 34
35059- 34 34 34 34 34 34 34 34 34 34 34 34
35060- 34 34 34 34 34 34 38 38 38 42 42 42
35061- 54 54 54 82 82 82 94 86 76 91 60 6
35062-134 86 6 156 107 11 167 114 7 175 118 6
35063-175 118 6 167 114 7 152 99 6 121 87 25
35064-101 98 89 62 62 62 34 34 34 18 18 18
35065- 6 6 6 0 0 0 0 0 0 0 0 0
35066- 0 0 0 0 0 0 0 0 0 0 0 0
35067- 0 0 0 0 0 0 0 0 0 0 0 0
35068- 0 0 0 0 0 0 0 0 0 0 0 0
35069- 0 0 0 0 0 0 0 0 0 0 0 0
35070- 0 0 0 0 0 0 0 0 0 0 0 0
35071- 0 0 0 0 0 0 0 0 0 0 0 0
35072- 0 0 0 6 6 6 6 6 6 10 10 10
35073- 18 18 18 22 22 22 30 30 30 42 42 42
35074- 50 50 50 66 66 66 86 86 86 101 98 89
35075-106 86 58 98 70 6 104 69 6 104 69 6
35076-104 69 6 91 60 6 82 62 34 90 90 90
35077- 62 62 62 38 38 38 22 22 22 14 14 14
35078- 10 10 10 10 10 10 10 10 10 10 10 10
35079- 10 10 10 10 10 10 6 6 6 10 10 10
35080- 10 10 10 10 10 10 10 10 10 14 14 14
35081- 22 22 22 42 42 42 70 70 70 89 81 66
35082- 80 54 7 104 69 6 124 80 6 137 92 6
35083-134 86 6 116 81 8 100 82 52 86 86 86
35084- 58 58 58 30 30 30 14 14 14 6 6 6
35085- 0 0 0 0 0 0 0 0 0 0 0 0
35086- 0 0 0 0 0 0 0 0 0 0 0 0
35087- 0 0 0 0 0 0 0 0 0 0 0 0
35088- 0 0 0 0 0 0 0 0 0 0 0 0
35089- 0 0 0 0 0 0 0 0 0 0 0 0
35090- 0 0 0 0 0 0 0 0 0 0 0 0
35091- 0 0 0 0 0 0 0 0 0 0 0 0
35092- 0 0 0 0 0 0 0 0 0 0 0 0
35093- 0 0 0 6 6 6 10 10 10 14 14 14
35094- 18 18 18 26 26 26 38 38 38 54 54 54
35095- 70 70 70 86 86 86 94 86 76 89 81 66
35096- 89 81 66 86 86 86 74 74 74 50 50 50
35097- 30 30 30 14 14 14 6 6 6 0 0 0
35098- 0 0 0 0 0 0 0 0 0 0 0 0
35099- 0 0 0 0 0 0 0 0 0 0 0 0
35100- 0 0 0 0 0 0 0 0 0 0 0 0
35101- 6 6 6 18 18 18 34 34 34 58 58 58
35102- 82 82 82 89 81 66 89 81 66 89 81 66
35103- 94 86 66 94 86 76 74 74 74 50 50 50
35104- 26 26 26 14 14 14 6 6 6 0 0 0
35105- 0 0 0 0 0 0 0 0 0 0 0 0
35106- 0 0 0 0 0 0 0 0 0 0 0 0
35107- 0 0 0 0 0 0 0 0 0 0 0 0
35108- 0 0 0 0 0 0 0 0 0 0 0 0
35109- 0 0 0 0 0 0 0 0 0 0 0 0
35110- 0 0 0 0 0 0 0 0 0 0 0 0
35111- 0 0 0 0 0 0 0 0 0 0 0 0
35112- 0 0 0 0 0 0 0 0 0 0 0 0
35113- 0 0 0 0 0 0 0 0 0 0 0 0
35114- 6 6 6 6 6 6 14 14 14 18 18 18
35115- 30 30 30 38 38 38 46 46 46 54 54 54
35116- 50 50 50 42 42 42 30 30 30 18 18 18
35117- 10 10 10 0 0 0 0 0 0 0 0 0
35118- 0 0 0 0 0 0 0 0 0 0 0 0
35119- 0 0 0 0 0 0 0 0 0 0 0 0
35120- 0 0 0 0 0 0 0 0 0 0 0 0
35121- 0 0 0 6 6 6 14 14 14 26 26 26
35122- 38 38 38 50 50 50 58 58 58 58 58 58
35123- 54 54 54 42 42 42 30 30 30 18 18 18
35124- 10 10 10 0 0 0 0 0 0 0 0 0
35125- 0 0 0 0 0 0 0 0 0 0 0 0
35126- 0 0 0 0 0 0 0 0 0 0 0 0
35127- 0 0 0 0 0 0 0 0 0 0 0 0
35128- 0 0 0 0 0 0 0 0 0 0 0 0
35129- 0 0 0 0 0 0 0 0 0 0 0 0
35130- 0 0 0 0 0 0 0 0 0 0 0 0
35131- 0 0 0 0 0 0 0 0 0 0 0 0
35132- 0 0 0 0 0 0 0 0 0 0 0 0
35133- 0 0 0 0 0 0 0 0 0 0 0 0
35134- 0 0 0 0 0 0 0 0 0 6 6 6
35135- 6 6 6 10 10 10 14 14 14 18 18 18
35136- 18 18 18 14 14 14 10 10 10 6 6 6
35137- 0 0 0 0 0 0 0 0 0 0 0 0
35138- 0 0 0 0 0 0 0 0 0 0 0 0
35139- 0 0 0 0 0 0 0 0 0 0 0 0
35140- 0 0 0 0 0 0 0 0 0 0 0 0
35141- 0 0 0 0 0 0 0 0 0 6 6 6
35142- 14 14 14 18 18 18 22 22 22 22 22 22
35143- 18 18 18 14 14 14 10 10 10 6 6 6
35144- 0 0 0 0 0 0 0 0 0 0 0 0
35145- 0 0 0 0 0 0 0 0 0 0 0 0
35146- 0 0 0 0 0 0 0 0 0 0 0 0
35147- 0 0 0 0 0 0 0 0 0 0 0 0
35148- 0 0 0 0 0 0 0 0 0 0 0 0
35149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35162+4 4 4 4 4 4
35163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35176+4 4 4 4 4 4
35177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35190+4 4 4 4 4 4
35191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35204+4 4 4 4 4 4
35205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35218+4 4 4 4 4 4
35219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35232+4 4 4 4 4 4
35233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35237+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
35238+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
35239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35242+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
35243+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
35244+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
35245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35246+4 4 4 4 4 4
35247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35251+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
35252+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
35253+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35256+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
35257+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
35258+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
35259+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35260+4 4 4 4 4 4
35261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35265+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
35266+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
35267+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
35268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35270+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
35271+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
35272+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
35273+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
35274+4 4 4 4 4 4
35275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35278+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
35279+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
35280+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
35281+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
35282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35283+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
35284+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
35285+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
35286+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
35287+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
35288+4 4 4 4 4 4
35289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35292+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
35293+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
35294+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
35295+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
35296+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
35297+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
35298+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
35299+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
35300+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
35301+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
35302+4 4 4 4 4 4
35303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
35306+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
35307+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
35308+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
35309+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
35310+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
35311+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
35312+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
35313+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
35314+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
35315+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
35316+4 4 4 4 4 4
35317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35319+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
35320+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
35321+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
35322+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
35323+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
35324+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
35325+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
35326+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
35327+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
35328+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
35329+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
35330+4 4 4 4 4 4
35331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35333+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
35334+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
35335+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
35336+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
35337+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
35338+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
35339+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
35340+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
35341+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
35342+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
35343+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
35344+4 4 4 4 4 4
35345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35347+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
35348+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
35349+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
35350+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
35351+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
35352+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
35353+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
35354+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
35355+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
35356+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
35357+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
35358+4 4 4 4 4 4
35359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35361+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
35362+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
35363+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
35364+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
35365+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
35366+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
35367+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
35368+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
35369+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
35370+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
35371+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
35372+4 4 4 4 4 4
35373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35374+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
35375+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
35376+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
35377+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
35378+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
35379+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
35380+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
35381+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
35382+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
35383+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
35384+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
35385+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
35386+4 4 4 4 4 4
35387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35388+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
35389+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
35390+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
35391+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
35392+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
35393+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
35394+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
35395+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
35396+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
35397+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
35398+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
35399+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
35400+0 0 0 4 4 4
35401+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
35402+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
35403+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
35404+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
35405+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
35406+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
35407+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
35408+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
35409+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
35410+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
35411+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
35412+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
35413+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
35414+2 0 0 0 0 0
35415+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
35416+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
35417+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
35418+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
35419+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
35420+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
35421+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
35422+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
35423+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
35424+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
35425+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
35426+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
35427+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
35428+37 38 37 0 0 0
35429+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
35430+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
35431+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
35432+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
35433+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
35434+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
35435+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
35436+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
35437+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
35438+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
35439+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
35440+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
35441+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
35442+85 115 134 4 0 0
35443+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
35444+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
35445+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
35446+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
35447+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
35448+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
35449+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
35450+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
35451+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
35452+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
35453+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
35454+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
35455+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
35456+60 73 81 4 0 0
35457+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
35458+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
35459+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
35460+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
35461+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
35462+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
35463+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
35464+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
35465+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
35466+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
35467+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
35468+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
35469+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
35470+16 19 21 4 0 0
35471+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
35472+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
35473+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
35474+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
35475+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
35476+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
35477+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
35478+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
35479+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
35480+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
35481+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
35482+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
35483+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
35484+4 0 0 4 3 3
35485+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
35486+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
35487+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
35488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
35489+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
35490+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
35491+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
35492+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
35493+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
35494+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
35495+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
35496+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
35497+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
35498+3 2 2 4 4 4
35499+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
35500+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
35501+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
35502+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
35503+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
35504+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
35505+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
35506+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
35507+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
35508+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
35509+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
35510+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
35511+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
35512+4 4 4 4 4 4
35513+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
35514+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
35515+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
35516+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
35517+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
35518+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
35519+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
35520+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
35521+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
35522+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
35523+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
35524+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
35525+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
35526+4 4 4 4 4 4
35527+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
35528+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
35529+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
35530+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
35531+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
35532+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
35533+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
35534+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
35535+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
35536+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
35537+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
35538+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
35539+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
35540+5 5 5 5 5 5
35541+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
35542+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
35543+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
35544+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
35545+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
35546+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
35547+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
35548+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
35549+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
35550+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
35551+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
35552+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
35553+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
35554+5 5 5 4 4 4
35555+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
35556+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
35557+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
35558+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
35559+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
35560+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
35561+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
35562+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
35563+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
35564+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
35565+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
35566+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
35567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35568+4 4 4 4 4 4
35569+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
35570+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
35571+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
35572+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
35573+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
35574+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
35575+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
35576+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
35577+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
35578+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
35579+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
35580+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
35581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35582+4 4 4 4 4 4
35583+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
35584+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
35585+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
35586+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
35587+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
35588+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
35589+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
35590+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
35591+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
35592+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
35593+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
35594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35596+4 4 4 4 4 4
35597+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
35598+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
35599+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
35600+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
35601+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
35602+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
35603+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
35604+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
35605+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
35606+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
35607+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
35608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35610+4 4 4 4 4 4
35611+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
35612+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
35613+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
35614+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
35615+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
35616+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
35617+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
35618+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
35619+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
35620+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
35621+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35624+4 4 4 4 4 4
35625+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
35626+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
35627+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
35628+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
35629+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
35630+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
35631+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
35632+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
35633+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
35634+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
35635+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
35636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35638+4 4 4 4 4 4
35639+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
35640+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
35641+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
35642+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
35643+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
35644+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
35645+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
35646+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
35647+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
35648+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
35649+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
35650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35652+4 4 4 4 4 4
35653+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
35654+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
35655+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
35656+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
35657+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
35658+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
35659+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
35660+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
35661+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
35662+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
35663+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35666+4 4 4 4 4 4
35667+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
35668+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
35669+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
35670+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
35671+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35672+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
35673+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
35674+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
35675+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
35676+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
35677+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35680+4 4 4 4 4 4
35681+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
35682+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
35683+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
35684+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
35685+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35686+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
35687+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
35688+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
35689+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
35690+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
35691+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35694+4 4 4 4 4 4
35695+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
35696+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
35697+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
35698+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
35699+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35700+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
35701+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
35702+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
35703+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
35704+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35705+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35708+4 4 4 4 4 4
35709+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
35710+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
35711+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
35712+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
35713+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
35714+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
35715+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
35716+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
35717+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
35718+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35719+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35722+4 4 4 4 4 4
35723+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
35724+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
35725+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
35726+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
35727+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35728+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
35729+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
35730+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
35731+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
35732+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35733+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35736+4 4 4 4 4 4
35737+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
35738+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
35739+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
35740+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
35741+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
35742+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
35743+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
35744+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
35745+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
35746+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35747+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35750+4 4 4 4 4 4
35751+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
35752+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
35753+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
35754+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
35755+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
35756+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
35757+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
35758+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
35759+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
35760+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35761+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35764+4 4 4 4 4 4
35765+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
35766+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
35767+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
35768+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
35769+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
35770+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
35771+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
35772+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
35773+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
35774+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35775+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35778+4 4 4 4 4 4
35779+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
35780+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
35781+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
35782+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
35783+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
35784+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
35785+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
35786+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
35787+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
35788+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35789+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35792+4 4 4 4 4 4
35793+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
35794+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
35795+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
35796+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
35797+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
35798+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
35799+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
35800+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
35801+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
35802+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35803+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35806+4 4 4 4 4 4
35807+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
35808+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
35809+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
35810+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
35811+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
35812+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
35813+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
35814+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
35815+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
35816+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35817+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35820+4 4 4 4 4 4
35821+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
35822+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
35823+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
35824+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
35825+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
35826+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
35827+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35828+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
35829+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
35830+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35831+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35834+4 4 4 4 4 4
35835+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
35836+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
35837+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
35838+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
35839+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
35840+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
35841+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35842+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
35843+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
35844+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35845+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35848+4 4 4 4 4 4
35849+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
35850+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
35851+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
35852+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
35853+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
35854+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
35855+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
35856+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
35857+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
35858+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35859+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35862+4 4 4 4 4 4
35863+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
35864+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
35865+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
35866+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
35867+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
35868+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
35869+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
35870+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
35871+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
35872+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35873+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35876+4 4 4 4 4 4
35877+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
35878+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
35879+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
35880+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
35881+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
35882+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
35883+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
35884+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
35885+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
35886+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35887+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35890+4 4 4 4 4 4
35891+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
35892+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
35893+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
35894+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
35895+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
35896+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
35897+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
35898+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
35899+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
35900+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35901+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35904+4 4 4 4 4 4
35905+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
35906+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
35907+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
35908+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
35909+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
35910+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
35911+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
35912+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
35913+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
35914+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
35915+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35918+4 4 4 4 4 4
35919+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
35920+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
35921+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
35922+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
35923+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
35924+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
35925+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
35926+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
35927+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
35928+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
35929+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35932+4 4 4 4 4 4
35933+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
35934+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
35935+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
35936+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
35937+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
35938+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
35939+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35940+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
35941+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
35942+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
35943+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
35944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35946+4 4 4 4 4 4
35947+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
35948+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
35949+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
35950+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
35951+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
35952+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
35953+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
35954+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
35955+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
35956+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
35957+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35960+4 4 4 4 4 4
35961+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
35962+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
35963+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
35964+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
35965+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
35966+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
35967+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
35968+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
35969+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
35970+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
35971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35974+4 4 4 4 4 4
35975+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
35976+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
35977+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
35978+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
35979+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
35980+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
35981+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
35982+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
35983+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
35984+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
35985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35988+4 4 4 4 4 4
35989+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
35990+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
35991+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
35992+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
35993+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
35994+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
35995+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
35996+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
35997+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
35998+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
35999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36002+4 4 4 4 4 4
36003+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
36004+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
36005+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
36006+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
36007+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
36008+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
36009+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
36010+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
36011+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
36012+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36016+4 4 4 4 4 4
36017+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
36018+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
36019+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
36020+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
36021+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
36022+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
36023+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
36024+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
36025+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
36026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36030+4 4 4 4 4 4
36031+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
36032+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
36033+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
36034+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
36035+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
36036+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
36037+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
36038+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
36039+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
36040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36044+4 4 4 4 4 4
36045+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
36046+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
36047+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
36048+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
36049+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
36050+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
36051+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
36052+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
36053+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36058+4 4 4 4 4 4
36059+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
36060+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
36061+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
36062+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
36063+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
36064+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
36065+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
36066+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
36067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36072+4 4 4 4 4 4
36073+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
36074+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
36075+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
36076+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
36077+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
36078+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
36079+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
36080+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
36081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36086+4 4 4 4 4 4
36087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
36088+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
36089+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
36090+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
36091+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
36092+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
36093+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
36094+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
36095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36100+4 4 4 4 4 4
36101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36102+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
36103+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
36104+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
36105+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
36106+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
36107+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
36108+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
36109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36114+4 4 4 4 4 4
36115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36116+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
36117+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
36118+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
36119+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
36120+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
36121+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
36122+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36128+4 4 4 4 4 4
36129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36131+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
36132+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
36133+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
36134+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
36135+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
36136+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36142+4 4 4 4 4 4
36143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
36146+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
36147+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
36148+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
36149+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
36150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36156+4 4 4 4 4 4
36157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36160+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
36161+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
36162+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
36163+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
36164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36170+4 4 4 4 4 4
36171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36174+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
36175+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
36176+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
36177+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
36178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36184+4 4 4 4 4 4
36185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36188+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
36189+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
36190+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
36191+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
36192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36198+4 4 4 4 4 4
36199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
36203+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
36204+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
36205+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36212+4 4 4 4 4 4
36213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36217+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
36218+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
36219+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
36220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36226+4 4 4 4 4 4
36227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36231+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
36232+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
36233+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36240+4 4 4 4 4 4
36241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36245+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
36246+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
36247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36254+4 4 4 4 4 4
36255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36259+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
36260+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
36261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36268+4 4 4 4 4 4
36269diff -urNp linux-3.0.4/drivers/video/udlfb.c linux-3.0.4/drivers/video/udlfb.c
36270--- linux-3.0.4/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
36271+++ linux-3.0.4/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
36272@@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
36273 dlfb_urb_completion(urb);
36274
36275 error:
36276- atomic_add(bytes_sent, &dev->bytes_sent);
36277- atomic_add(bytes_identical, &dev->bytes_identical);
36278- atomic_add(width*height*2, &dev->bytes_rendered);
36279+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
36280+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
36281+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
36282 end_cycles = get_cycles();
36283- atomic_add(((unsigned int) ((end_cycles - start_cycles)
36284+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
36285 >> 10)), /* Kcycles */
36286 &dev->cpu_kcycles_used);
36287
36288@@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
36289 dlfb_urb_completion(urb);
36290
36291 error:
36292- atomic_add(bytes_sent, &dev->bytes_sent);
36293- atomic_add(bytes_identical, &dev->bytes_identical);
36294- atomic_add(bytes_rendered, &dev->bytes_rendered);
36295+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
36296+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
36297+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
36298 end_cycles = get_cycles();
36299- atomic_add(((unsigned int) ((end_cycles - start_cycles)
36300+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
36301 >> 10)), /* Kcycles */
36302 &dev->cpu_kcycles_used);
36303 }
36304@@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
36305 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36306 struct dlfb_data *dev = fb_info->par;
36307 return snprintf(buf, PAGE_SIZE, "%u\n",
36308- atomic_read(&dev->bytes_rendered));
36309+ atomic_read_unchecked(&dev->bytes_rendered));
36310 }
36311
36312 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
36313@@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
36314 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36315 struct dlfb_data *dev = fb_info->par;
36316 return snprintf(buf, PAGE_SIZE, "%u\n",
36317- atomic_read(&dev->bytes_identical));
36318+ atomic_read_unchecked(&dev->bytes_identical));
36319 }
36320
36321 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
36322@@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
36323 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36324 struct dlfb_data *dev = fb_info->par;
36325 return snprintf(buf, PAGE_SIZE, "%u\n",
36326- atomic_read(&dev->bytes_sent));
36327+ atomic_read_unchecked(&dev->bytes_sent));
36328 }
36329
36330 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
36331@@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
36332 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36333 struct dlfb_data *dev = fb_info->par;
36334 return snprintf(buf, PAGE_SIZE, "%u\n",
36335- atomic_read(&dev->cpu_kcycles_used));
36336+ atomic_read_unchecked(&dev->cpu_kcycles_used));
36337 }
36338
36339 static ssize_t edid_show(
36340@@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
36341 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36342 struct dlfb_data *dev = fb_info->par;
36343
36344- atomic_set(&dev->bytes_rendered, 0);
36345- atomic_set(&dev->bytes_identical, 0);
36346- atomic_set(&dev->bytes_sent, 0);
36347- atomic_set(&dev->cpu_kcycles_used, 0);
36348+ atomic_set_unchecked(&dev->bytes_rendered, 0);
36349+ atomic_set_unchecked(&dev->bytes_identical, 0);
36350+ atomic_set_unchecked(&dev->bytes_sent, 0);
36351+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
36352
36353 return count;
36354 }
36355diff -urNp linux-3.0.4/drivers/video/uvesafb.c linux-3.0.4/drivers/video/uvesafb.c
36356--- linux-3.0.4/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
36357+++ linux-3.0.4/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
36358@@ -19,6 +19,7 @@
36359 #include <linux/io.h>
36360 #include <linux/mutex.h>
36361 #include <linux/slab.h>
36362+#include <linux/moduleloader.h>
36363 #include <video/edid.h>
36364 #include <video/uvesafb.h>
36365 #ifdef CONFIG_X86
36366@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
36367 NULL,
36368 };
36369
36370- return call_usermodehelper(v86d_path, argv, envp, 1);
36371+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
36372 }
36373
36374 /*
36375@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
36376 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
36377 par->pmi_setpal = par->ypan = 0;
36378 } else {
36379+
36380+#ifdef CONFIG_PAX_KERNEXEC
36381+#ifdef CONFIG_MODULES
36382+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
36383+#endif
36384+ if (!par->pmi_code) {
36385+ par->pmi_setpal = par->ypan = 0;
36386+ return 0;
36387+ }
36388+#endif
36389+
36390 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
36391 + task->t.regs.edi);
36392+
36393+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36394+ pax_open_kernel();
36395+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
36396+ pax_close_kernel();
36397+
36398+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
36399+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
36400+#else
36401 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
36402 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
36403+#endif
36404+
36405 printk(KERN_INFO "uvesafb: protected mode interface info at "
36406 "%04x:%04x\n",
36407 (u16)task->t.regs.es, (u16)task->t.regs.edi);
36408@@ -1821,6 +1844,11 @@ out:
36409 if (par->vbe_modes)
36410 kfree(par->vbe_modes);
36411
36412+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36413+ if (par->pmi_code)
36414+ module_free_exec(NULL, par->pmi_code);
36415+#endif
36416+
36417 framebuffer_release(info);
36418 return err;
36419 }
36420@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
36421 kfree(par->vbe_state_orig);
36422 if (par->vbe_state_saved)
36423 kfree(par->vbe_state_saved);
36424+
36425+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36426+ if (par->pmi_code)
36427+ module_free_exec(NULL, par->pmi_code);
36428+#endif
36429+
36430 }
36431
36432 framebuffer_release(info);
36433diff -urNp linux-3.0.4/drivers/video/vesafb.c linux-3.0.4/drivers/video/vesafb.c
36434--- linux-3.0.4/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
36435+++ linux-3.0.4/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
36436@@ -9,6 +9,7 @@
36437 */
36438
36439 #include <linux/module.h>
36440+#include <linux/moduleloader.h>
36441 #include <linux/kernel.h>
36442 #include <linux/errno.h>
36443 #include <linux/string.h>
36444@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
36445 static int vram_total __initdata; /* Set total amount of memory */
36446 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
36447 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
36448-static void (*pmi_start)(void) __read_mostly;
36449-static void (*pmi_pal) (void) __read_mostly;
36450+static void (*pmi_start)(void) __read_only;
36451+static void (*pmi_pal) (void) __read_only;
36452 static int depth __read_mostly;
36453 static int vga_compat __read_mostly;
36454 /* --------------------------------------------------------------------- */
36455@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
36456 unsigned int size_vmode;
36457 unsigned int size_remap;
36458 unsigned int size_total;
36459+ void *pmi_code = NULL;
36460
36461 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
36462 return -ENODEV;
36463@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
36464 size_remap = size_total;
36465 vesafb_fix.smem_len = size_remap;
36466
36467-#ifndef __i386__
36468- screen_info.vesapm_seg = 0;
36469-#endif
36470-
36471 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
36472 printk(KERN_WARNING
36473 "vesafb: cannot reserve video memory at 0x%lx\n",
36474@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
36475 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
36476 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
36477
36478+#ifdef __i386__
36479+
36480+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36481+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
36482+ if (!pmi_code)
36483+#elif !defined(CONFIG_PAX_KERNEXEC)
36484+ if (0)
36485+#endif
36486+
36487+#endif
36488+ screen_info.vesapm_seg = 0;
36489+
36490 if (screen_info.vesapm_seg) {
36491- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
36492- screen_info.vesapm_seg,screen_info.vesapm_off);
36493+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
36494+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
36495 }
36496
36497 if (screen_info.vesapm_seg < 0xc000)
36498@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
36499
36500 if (ypan || pmi_setpal) {
36501 unsigned short *pmi_base;
36502+
36503 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
36504- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
36505- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
36506+
36507+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36508+ pax_open_kernel();
36509+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
36510+#else
36511+ pmi_code = pmi_base;
36512+#endif
36513+
36514+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
36515+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
36516+
36517+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36518+ pmi_start = ktva_ktla(pmi_start);
36519+ pmi_pal = ktva_ktla(pmi_pal);
36520+ pax_close_kernel();
36521+#endif
36522+
36523 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
36524 if (pmi_base[3]) {
36525 printk(KERN_INFO "vesafb: pmi: ports = ");
36526@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
36527 info->node, info->fix.id);
36528 return 0;
36529 err:
36530+
36531+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36532+ module_free_exec(NULL, pmi_code);
36533+#endif
36534+
36535 if (info->screen_base)
36536 iounmap(info->screen_base);
36537 framebuffer_release(info);
36538diff -urNp linux-3.0.4/drivers/video/via/via_clock.h linux-3.0.4/drivers/video/via/via_clock.h
36539--- linux-3.0.4/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
36540+++ linux-3.0.4/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
36541@@ -56,7 +56,7 @@ struct via_clock {
36542
36543 void (*set_engine_pll_state)(u8 state);
36544 void (*set_engine_pll)(struct via_pll_config config);
36545-};
36546+} __no_const;
36547
36548
36549 static inline u32 get_pll_internal_frequency(u32 ref_freq,
36550diff -urNp linux-3.0.4/drivers/virtio/virtio_balloon.c linux-3.0.4/drivers/virtio/virtio_balloon.c
36551--- linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
36552+++ linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
36553@@ -174,6 +174,8 @@ static void update_balloon_stats(struct
36554 struct sysinfo i;
36555 int idx = 0;
36556
36557+ pax_track_stack();
36558+
36559 all_vm_events(events);
36560 si_meminfo(&i);
36561
36562diff -urNp linux-3.0.4/fs/9p/vfs_inode.c linux-3.0.4/fs/9p/vfs_inode.c
36563--- linux-3.0.4/fs/9p/vfs_inode.c 2011-07-21 22:17:23.000000000 -0400
36564+++ linux-3.0.4/fs/9p/vfs_inode.c 2011-08-23 21:47:56.000000000 -0400
36565@@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
36566 void
36567 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
36568 {
36569- char *s = nd_get_link(nd);
36570+ const char *s = nd_get_link(nd);
36571
36572 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
36573 IS_ERR(s) ? "<error>" : s);
36574diff -urNp linux-3.0.4/fs/aio.c linux-3.0.4/fs/aio.c
36575--- linux-3.0.4/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
36576+++ linux-3.0.4/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
36577@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
36578 size += sizeof(struct io_event) * nr_events;
36579 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
36580
36581- if (nr_pages < 0)
36582+ if (nr_pages <= 0)
36583 return -EINVAL;
36584
36585 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
36586@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
36587 struct aio_timeout to;
36588 int retry = 0;
36589
36590+ pax_track_stack();
36591+
36592 /* needed to zero any padding within an entry (there shouldn't be
36593 * any, but C is fun!
36594 */
36595@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
36596 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
36597 {
36598 ssize_t ret;
36599+ struct iovec iovstack;
36600
36601 #ifdef CONFIG_COMPAT
36602 if (compat)
36603 ret = compat_rw_copy_check_uvector(type,
36604 (struct compat_iovec __user *)kiocb->ki_buf,
36605- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
36606+ kiocb->ki_nbytes, 1, &iovstack,
36607 &kiocb->ki_iovec);
36608 else
36609 #endif
36610 ret = rw_copy_check_uvector(type,
36611 (struct iovec __user *)kiocb->ki_buf,
36612- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
36613+ kiocb->ki_nbytes, 1, &iovstack,
36614 &kiocb->ki_iovec);
36615 if (ret < 0)
36616 goto out;
36617
36618+ if (kiocb->ki_iovec == &iovstack) {
36619+ kiocb->ki_inline_vec = iovstack;
36620+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
36621+ }
36622 kiocb->ki_nr_segs = kiocb->ki_nbytes;
36623 kiocb->ki_cur_seg = 0;
36624 /* ki_nbytes/left now reflect bytes instead of segs */
36625diff -urNp linux-3.0.4/fs/attr.c linux-3.0.4/fs/attr.c
36626--- linux-3.0.4/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
36627+++ linux-3.0.4/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
36628@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
36629 unsigned long limit;
36630
36631 limit = rlimit(RLIMIT_FSIZE);
36632+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
36633 if (limit != RLIM_INFINITY && offset > limit)
36634 goto out_sig;
36635 if (offset > inode->i_sb->s_maxbytes)
36636diff -urNp linux-3.0.4/fs/befs/linuxvfs.c linux-3.0.4/fs/befs/linuxvfs.c
36637--- linux-3.0.4/fs/befs/linuxvfs.c 2011-08-29 23:26:13.000000000 -0400
36638+++ linux-3.0.4/fs/befs/linuxvfs.c 2011-08-29 23:26:27.000000000 -0400
36639@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
36640 {
36641 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
36642 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
36643- char *link = nd_get_link(nd);
36644+ const char *link = nd_get_link(nd);
36645 if (!IS_ERR(link))
36646 kfree(link);
36647 }
36648diff -urNp linux-3.0.4/fs/binfmt_aout.c linux-3.0.4/fs/binfmt_aout.c
36649--- linux-3.0.4/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
36650+++ linux-3.0.4/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
36651@@ -16,6 +16,7 @@
36652 #include <linux/string.h>
36653 #include <linux/fs.h>
36654 #include <linux/file.h>
36655+#include <linux/security.h>
36656 #include <linux/stat.h>
36657 #include <linux/fcntl.h>
36658 #include <linux/ptrace.h>
36659@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
36660 #endif
36661 # define START_STACK(u) ((void __user *)u.start_stack)
36662
36663+ memset(&dump, 0, sizeof(dump));
36664+
36665 fs = get_fs();
36666 set_fs(KERNEL_DS);
36667 has_dumped = 1;
36668@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
36669
36670 /* If the size of the dump file exceeds the rlimit, then see what would happen
36671 if we wrote the stack, but not the data area. */
36672+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
36673 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
36674 dump.u_dsize = 0;
36675
36676 /* Make sure we have enough room to write the stack and data areas. */
36677+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
36678 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
36679 dump.u_ssize = 0;
36680
36681@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
36682 rlim = rlimit(RLIMIT_DATA);
36683 if (rlim >= RLIM_INFINITY)
36684 rlim = ~0;
36685+
36686+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
36687 if (ex.a_data + ex.a_bss > rlim)
36688 return -ENOMEM;
36689
36690@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
36691 install_exec_creds(bprm);
36692 current->flags &= ~PF_FORKNOEXEC;
36693
36694+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36695+ current->mm->pax_flags = 0UL;
36696+#endif
36697+
36698+#ifdef CONFIG_PAX_PAGEEXEC
36699+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
36700+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
36701+
36702+#ifdef CONFIG_PAX_EMUTRAMP
36703+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
36704+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
36705+#endif
36706+
36707+#ifdef CONFIG_PAX_MPROTECT
36708+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
36709+ current->mm->pax_flags |= MF_PAX_MPROTECT;
36710+#endif
36711+
36712+ }
36713+#endif
36714+
36715 if (N_MAGIC(ex) == OMAGIC) {
36716 unsigned long text_addr, map_size;
36717 loff_t pos;
36718@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
36719
36720 down_write(&current->mm->mmap_sem);
36721 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
36722- PROT_READ | PROT_WRITE | PROT_EXEC,
36723+ PROT_READ | PROT_WRITE,
36724 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
36725 fd_offset + ex.a_text);
36726 up_write(&current->mm->mmap_sem);
36727diff -urNp linux-3.0.4/fs/binfmt_elf.c linux-3.0.4/fs/binfmt_elf.c
36728--- linux-3.0.4/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
36729+++ linux-3.0.4/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
36730@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
36731 #define elf_core_dump NULL
36732 #endif
36733
36734+#ifdef CONFIG_PAX_MPROTECT
36735+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
36736+#endif
36737+
36738 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
36739 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
36740 #else
36741@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
36742 .load_binary = load_elf_binary,
36743 .load_shlib = load_elf_library,
36744 .core_dump = elf_core_dump,
36745+
36746+#ifdef CONFIG_PAX_MPROTECT
36747+ .handle_mprotect= elf_handle_mprotect,
36748+#endif
36749+
36750 .min_coredump = ELF_EXEC_PAGESIZE,
36751 };
36752
36753@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
36754
36755 static int set_brk(unsigned long start, unsigned long end)
36756 {
36757+ unsigned long e = end;
36758+
36759 start = ELF_PAGEALIGN(start);
36760 end = ELF_PAGEALIGN(end);
36761 if (end > start) {
36762@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
36763 if (BAD_ADDR(addr))
36764 return addr;
36765 }
36766- current->mm->start_brk = current->mm->brk = end;
36767+ current->mm->start_brk = current->mm->brk = e;
36768 return 0;
36769 }
36770
36771@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
36772 elf_addr_t __user *u_rand_bytes;
36773 const char *k_platform = ELF_PLATFORM;
36774 const char *k_base_platform = ELF_BASE_PLATFORM;
36775- unsigned char k_rand_bytes[16];
36776+ u32 k_rand_bytes[4];
36777 int items;
36778 elf_addr_t *elf_info;
36779 int ei_index = 0;
36780 const struct cred *cred = current_cred();
36781 struct vm_area_struct *vma;
36782+ unsigned long saved_auxv[AT_VECTOR_SIZE];
36783+
36784+ pax_track_stack();
36785
36786 /*
36787 * In some cases (e.g. Hyper-Threading), we want to avoid L1
36788@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
36789 * Generate 16 random bytes for userspace PRNG seeding.
36790 */
36791 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
36792- u_rand_bytes = (elf_addr_t __user *)
36793- STACK_ALLOC(p, sizeof(k_rand_bytes));
36794+ srandom32(k_rand_bytes[0] ^ random32());
36795+ srandom32(k_rand_bytes[1] ^ random32());
36796+ srandom32(k_rand_bytes[2] ^ random32());
36797+ srandom32(k_rand_bytes[3] ^ random32());
36798+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
36799+ u_rand_bytes = (elf_addr_t __user *) p;
36800 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
36801 return -EFAULT;
36802
36803@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
36804 return -EFAULT;
36805 current->mm->env_end = p;
36806
36807+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
36808+
36809 /* Put the elf_info on the stack in the right place. */
36810 sp = (elf_addr_t __user *)envp + 1;
36811- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
36812+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
36813 return -EFAULT;
36814 return 0;
36815 }
36816@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
36817 {
36818 struct elf_phdr *elf_phdata;
36819 struct elf_phdr *eppnt;
36820- unsigned long load_addr = 0;
36821+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
36822 int load_addr_set = 0;
36823 unsigned long last_bss = 0, elf_bss = 0;
36824- unsigned long error = ~0UL;
36825+ unsigned long error = -EINVAL;
36826 unsigned long total_size;
36827 int retval, i, size;
36828
36829@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
36830 goto out_close;
36831 }
36832
36833+#ifdef CONFIG_PAX_SEGMEXEC
36834+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
36835+ pax_task_size = SEGMEXEC_TASK_SIZE;
36836+#endif
36837+
36838 eppnt = elf_phdata;
36839 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
36840 if (eppnt->p_type == PT_LOAD) {
36841@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
36842 k = load_addr + eppnt->p_vaddr;
36843 if (BAD_ADDR(k) ||
36844 eppnt->p_filesz > eppnt->p_memsz ||
36845- eppnt->p_memsz > TASK_SIZE ||
36846- TASK_SIZE - eppnt->p_memsz < k) {
36847+ eppnt->p_memsz > pax_task_size ||
36848+ pax_task_size - eppnt->p_memsz < k) {
36849 error = -ENOMEM;
36850 goto out_close;
36851 }
36852@@ -528,6 +553,193 @@ out:
36853 return error;
36854 }
36855
36856+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
36857+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
36858+{
36859+ unsigned long pax_flags = 0UL;
36860+
36861+#ifdef CONFIG_PAX_PAGEEXEC
36862+ if (elf_phdata->p_flags & PF_PAGEEXEC)
36863+ pax_flags |= MF_PAX_PAGEEXEC;
36864+#endif
36865+
36866+#ifdef CONFIG_PAX_SEGMEXEC
36867+ if (elf_phdata->p_flags & PF_SEGMEXEC)
36868+ pax_flags |= MF_PAX_SEGMEXEC;
36869+#endif
36870+
36871+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36872+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36873+ if ((__supported_pte_mask & _PAGE_NX))
36874+ pax_flags &= ~MF_PAX_SEGMEXEC;
36875+ else
36876+ pax_flags &= ~MF_PAX_PAGEEXEC;
36877+ }
36878+#endif
36879+
36880+#ifdef CONFIG_PAX_EMUTRAMP
36881+ if (elf_phdata->p_flags & PF_EMUTRAMP)
36882+ pax_flags |= MF_PAX_EMUTRAMP;
36883+#endif
36884+
36885+#ifdef CONFIG_PAX_MPROTECT
36886+ if (elf_phdata->p_flags & PF_MPROTECT)
36887+ pax_flags |= MF_PAX_MPROTECT;
36888+#endif
36889+
36890+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36891+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
36892+ pax_flags |= MF_PAX_RANDMMAP;
36893+#endif
36894+
36895+ return pax_flags;
36896+}
36897+#endif
36898+
36899+#ifdef CONFIG_PAX_PT_PAX_FLAGS
36900+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
36901+{
36902+ unsigned long pax_flags = 0UL;
36903+
36904+#ifdef CONFIG_PAX_PAGEEXEC
36905+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
36906+ pax_flags |= MF_PAX_PAGEEXEC;
36907+#endif
36908+
36909+#ifdef CONFIG_PAX_SEGMEXEC
36910+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
36911+ pax_flags |= MF_PAX_SEGMEXEC;
36912+#endif
36913+
36914+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36915+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36916+ if ((__supported_pte_mask & _PAGE_NX))
36917+ pax_flags &= ~MF_PAX_SEGMEXEC;
36918+ else
36919+ pax_flags &= ~MF_PAX_PAGEEXEC;
36920+ }
36921+#endif
36922+
36923+#ifdef CONFIG_PAX_EMUTRAMP
36924+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
36925+ pax_flags |= MF_PAX_EMUTRAMP;
36926+#endif
36927+
36928+#ifdef CONFIG_PAX_MPROTECT
36929+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
36930+ pax_flags |= MF_PAX_MPROTECT;
36931+#endif
36932+
36933+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36934+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
36935+ pax_flags |= MF_PAX_RANDMMAP;
36936+#endif
36937+
36938+ return pax_flags;
36939+}
36940+#endif
36941+
36942+#ifdef CONFIG_PAX_EI_PAX
36943+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
36944+{
36945+ unsigned long pax_flags = 0UL;
36946+
36947+#ifdef CONFIG_PAX_PAGEEXEC
36948+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
36949+ pax_flags |= MF_PAX_PAGEEXEC;
36950+#endif
36951+
36952+#ifdef CONFIG_PAX_SEGMEXEC
36953+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
36954+ pax_flags |= MF_PAX_SEGMEXEC;
36955+#endif
36956+
36957+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36958+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36959+ if ((__supported_pte_mask & _PAGE_NX))
36960+ pax_flags &= ~MF_PAX_SEGMEXEC;
36961+ else
36962+ pax_flags &= ~MF_PAX_PAGEEXEC;
36963+ }
36964+#endif
36965+
36966+#ifdef CONFIG_PAX_EMUTRAMP
36967+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
36968+ pax_flags |= MF_PAX_EMUTRAMP;
36969+#endif
36970+
36971+#ifdef CONFIG_PAX_MPROTECT
36972+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
36973+ pax_flags |= MF_PAX_MPROTECT;
36974+#endif
36975+
36976+#ifdef CONFIG_PAX_ASLR
36977+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
36978+ pax_flags |= MF_PAX_RANDMMAP;
36979+#endif
36980+
36981+ return pax_flags;
36982+}
36983+#endif
36984+
36985+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36986+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
36987+{
36988+ unsigned long pax_flags = 0UL;
36989+
36990+#ifdef CONFIG_PAX_PT_PAX_FLAGS
36991+ unsigned long i;
36992+ int found_flags = 0;
36993+#endif
36994+
36995+#ifdef CONFIG_PAX_EI_PAX
36996+ pax_flags = pax_parse_ei_pax(elf_ex);
36997+#endif
36998+
36999+#ifdef CONFIG_PAX_PT_PAX_FLAGS
37000+ for (i = 0UL; i < elf_ex->e_phnum; i++)
37001+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
37002+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
37003+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
37004+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
37005+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
37006+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
37007+ return -EINVAL;
37008+
37009+#ifdef CONFIG_PAX_SOFTMODE
37010+ if (pax_softmode)
37011+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
37012+ else
37013+#endif
37014+
37015+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
37016+ found_flags = 1;
37017+ break;
37018+ }
37019+#endif
37020+
37021+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
37022+ if (found_flags == 0) {
37023+ struct elf_phdr phdr;
37024+ memset(&phdr, 0, sizeof(phdr));
37025+ phdr.p_flags = PF_NOEMUTRAMP;
37026+#ifdef CONFIG_PAX_SOFTMODE
37027+ if (pax_softmode)
37028+ pax_flags = pax_parse_softmode(&phdr);
37029+ else
37030+#endif
37031+ pax_flags = pax_parse_hardmode(&phdr);
37032+ }
37033+#endif
37034+
37035+ if (0 > pax_check_flags(&pax_flags))
37036+ return -EINVAL;
37037+
37038+ current->mm->pax_flags = pax_flags;
37039+ return 0;
37040+}
37041+#endif
37042+
37043 /*
37044 * These are the functions used to load ELF style executables and shared
37045 * libraries. There is no binary dependent code anywhere else.
37046@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
37047 {
37048 unsigned int random_variable = 0;
37049
37050+#ifdef CONFIG_PAX_RANDUSTACK
37051+ if (randomize_va_space)
37052+ return stack_top - current->mm->delta_stack;
37053+#endif
37054+
37055 if ((current->flags & PF_RANDOMIZE) &&
37056 !(current->personality & ADDR_NO_RANDOMIZE)) {
37057 random_variable = get_random_int() & STACK_RND_MASK;
37058@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
37059 unsigned long load_addr = 0, load_bias = 0;
37060 int load_addr_set = 0;
37061 char * elf_interpreter = NULL;
37062- unsigned long error;
37063+ unsigned long error = 0;
37064 struct elf_phdr *elf_ppnt, *elf_phdata;
37065 unsigned long elf_bss, elf_brk;
37066 int retval, i;
37067@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
37068 unsigned long start_code, end_code, start_data, end_data;
37069 unsigned long reloc_func_desc __maybe_unused = 0;
37070 int executable_stack = EXSTACK_DEFAULT;
37071- unsigned long def_flags = 0;
37072 struct {
37073 struct elfhdr elf_ex;
37074 struct elfhdr interp_elf_ex;
37075 } *loc;
37076+ unsigned long pax_task_size = TASK_SIZE;
37077
37078 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
37079 if (!loc) {
37080@@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
37081
37082 /* OK, This is the point of no return */
37083 current->flags &= ~PF_FORKNOEXEC;
37084- current->mm->def_flags = def_flags;
37085+
37086+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
37087+ current->mm->pax_flags = 0UL;
37088+#endif
37089+
37090+#ifdef CONFIG_PAX_DLRESOLVE
37091+ current->mm->call_dl_resolve = 0UL;
37092+#endif
37093+
37094+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
37095+ current->mm->call_syscall = 0UL;
37096+#endif
37097+
37098+#ifdef CONFIG_PAX_ASLR
37099+ current->mm->delta_mmap = 0UL;
37100+ current->mm->delta_stack = 0UL;
37101+#endif
37102+
37103+ current->mm->def_flags = 0;
37104+
37105+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
37106+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
37107+ send_sig(SIGKILL, current, 0);
37108+ goto out_free_dentry;
37109+ }
37110+#endif
37111+
37112+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
37113+ pax_set_initial_flags(bprm);
37114+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
37115+ if (pax_set_initial_flags_func)
37116+ (pax_set_initial_flags_func)(bprm);
37117+#endif
37118+
37119+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
37120+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
37121+ current->mm->context.user_cs_limit = PAGE_SIZE;
37122+ current->mm->def_flags |= VM_PAGEEXEC;
37123+ }
37124+#endif
37125+
37126+#ifdef CONFIG_PAX_SEGMEXEC
37127+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
37128+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
37129+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
37130+ pax_task_size = SEGMEXEC_TASK_SIZE;
37131+ current->mm->def_flags |= VM_NOHUGEPAGE;
37132+ }
37133+#endif
37134+
37135+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
37136+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37137+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
37138+ put_cpu();
37139+ }
37140+#endif
37141
37142 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
37143 may depend on the personality. */
37144 SET_PERSONALITY(loc->elf_ex);
37145+
37146+#ifdef CONFIG_PAX_ASLR
37147+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
37148+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
37149+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
37150+ }
37151+#endif
37152+
37153+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37154+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37155+ executable_stack = EXSTACK_DISABLE_X;
37156+ current->personality &= ~READ_IMPLIES_EXEC;
37157+ } else
37158+#endif
37159+
37160 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
37161 current->personality |= READ_IMPLIES_EXEC;
37162
37163@@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
37164 #else
37165 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
37166 #endif
37167+
37168+#ifdef CONFIG_PAX_RANDMMAP
37169+ /* PaX: randomize base address at the default exe base if requested */
37170+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
37171+#ifdef CONFIG_SPARC64
37172+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
37173+#else
37174+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
37175+#endif
37176+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
37177+ elf_flags |= MAP_FIXED;
37178+ }
37179+#endif
37180+
37181 }
37182
37183 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
37184@@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
37185 * allowed task size. Note that p_filesz must always be
37186 * <= p_memsz so it is only necessary to check p_memsz.
37187 */
37188- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37189- elf_ppnt->p_memsz > TASK_SIZE ||
37190- TASK_SIZE - elf_ppnt->p_memsz < k) {
37191+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37192+ elf_ppnt->p_memsz > pax_task_size ||
37193+ pax_task_size - elf_ppnt->p_memsz < k) {
37194 /* set_brk can never work. Avoid overflows. */
37195 send_sig(SIGKILL, current, 0);
37196 retval = -EINVAL;
37197@@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
37198 start_data += load_bias;
37199 end_data += load_bias;
37200
37201+#ifdef CONFIG_PAX_RANDMMAP
37202+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
37203+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
37204+#endif
37205+
37206 /* Calling set_brk effectively mmaps the pages that we need
37207 * for the bss and break sections. We must do this before
37208 * mapping in the interpreter, to make sure it doesn't wind
37209@@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
37210 goto out_free_dentry;
37211 }
37212 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
37213- send_sig(SIGSEGV, current, 0);
37214- retval = -EFAULT; /* Nobody gets to see this, but.. */
37215- goto out_free_dentry;
37216+ /*
37217+ * This bss-zeroing can fail if the ELF
37218+ * file specifies odd protections. So
37219+ * we don't check the return value
37220+ */
37221 }
37222
37223 if (elf_interpreter) {
37224@@ -1090,7 +1398,7 @@ out:
37225 * Decide what to dump of a segment, part, all or none.
37226 */
37227 static unsigned long vma_dump_size(struct vm_area_struct *vma,
37228- unsigned long mm_flags)
37229+ unsigned long mm_flags, long signr)
37230 {
37231 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
37232
37233@@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
37234 if (vma->vm_file == NULL)
37235 return 0;
37236
37237- if (FILTER(MAPPED_PRIVATE))
37238+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
37239 goto whole;
37240
37241 /*
37242@@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
37243 {
37244 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
37245 int i = 0;
37246- do
37247+ do {
37248 i += 2;
37249- while (auxv[i - 2] != AT_NULL);
37250+ } while (auxv[i - 2] != AT_NULL);
37251 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
37252 }
37253
37254@@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
37255 }
37256
37257 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
37258- unsigned long mm_flags)
37259+ struct coredump_params *cprm)
37260 {
37261 struct vm_area_struct *vma;
37262 size_t size = 0;
37263
37264 for (vma = first_vma(current, gate_vma); vma != NULL;
37265 vma = next_vma(vma, gate_vma))
37266- size += vma_dump_size(vma, mm_flags);
37267+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37268 return size;
37269 }
37270
37271@@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
37272
37273 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
37274
37275- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
37276+ offset += elf_core_vma_data_size(gate_vma, cprm);
37277 offset += elf_core_extra_data_size();
37278 e_shoff = offset;
37279
37280@@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
37281 offset = dataoff;
37282
37283 size += sizeof(*elf);
37284+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
37285 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
37286 goto end_coredump;
37287
37288 size += sizeof(*phdr4note);
37289+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
37290 if (size > cprm->limit
37291 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
37292 goto end_coredump;
37293@@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
37294 phdr.p_offset = offset;
37295 phdr.p_vaddr = vma->vm_start;
37296 phdr.p_paddr = 0;
37297- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
37298+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37299 phdr.p_memsz = vma->vm_end - vma->vm_start;
37300 offset += phdr.p_filesz;
37301 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
37302@@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
37303 phdr.p_align = ELF_EXEC_PAGESIZE;
37304
37305 size += sizeof(phdr);
37306+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
37307 if (size > cprm->limit
37308 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
37309 goto end_coredump;
37310@@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
37311 unsigned long addr;
37312 unsigned long end;
37313
37314- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
37315+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37316
37317 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
37318 struct page *page;
37319@@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
37320 page = get_dump_page(addr);
37321 if (page) {
37322 void *kaddr = kmap(page);
37323+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
37324 stop = ((size += PAGE_SIZE) > cprm->limit) ||
37325 !dump_write(cprm->file, kaddr,
37326 PAGE_SIZE);
37327@@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
37328
37329 if (e_phnum == PN_XNUM) {
37330 size += sizeof(*shdr4extnum);
37331+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
37332 if (size > cprm->limit
37333 || !dump_write(cprm->file, shdr4extnum,
37334 sizeof(*shdr4extnum)))
37335@@ -2067,6 +2380,97 @@ out:
37336
37337 #endif /* CONFIG_ELF_CORE */
37338
37339+#ifdef CONFIG_PAX_MPROTECT
37340+/* PaX: non-PIC ELF libraries need relocations on their executable segments
37341+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
37342+ * we'll remove VM_MAYWRITE for good on RELRO segments.
37343+ *
37344+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
37345+ * basis because we want to allow the common case and not the special ones.
37346+ */
37347+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
37348+{
37349+ struct elfhdr elf_h;
37350+ struct elf_phdr elf_p;
37351+ unsigned long i;
37352+ unsigned long oldflags;
37353+ bool is_textrel_rw, is_textrel_rx, is_relro;
37354+
37355+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
37356+ return;
37357+
37358+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
37359+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
37360+
37361+#ifdef CONFIG_PAX_ELFRELOCS
37362+ /* possible TEXTREL */
37363+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
37364+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
37365+#else
37366+ is_textrel_rw = false;
37367+ is_textrel_rx = false;
37368+#endif
37369+
37370+ /* possible RELRO */
37371+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
37372+
37373+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
37374+ return;
37375+
37376+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
37377+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
37378+
37379+#ifdef CONFIG_PAX_ETEXECRELOCS
37380+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37381+#else
37382+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
37383+#endif
37384+
37385+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37386+ !elf_check_arch(&elf_h) ||
37387+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
37388+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
37389+ return;
37390+
37391+ for (i = 0UL; i < elf_h.e_phnum; i++) {
37392+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
37393+ return;
37394+ switch (elf_p.p_type) {
37395+ case PT_DYNAMIC:
37396+ if (!is_textrel_rw && !is_textrel_rx)
37397+ continue;
37398+ i = 0UL;
37399+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
37400+ elf_dyn dyn;
37401+
37402+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
37403+ return;
37404+ if (dyn.d_tag == DT_NULL)
37405+ return;
37406+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
37407+ gr_log_textrel(vma);
37408+ if (is_textrel_rw)
37409+ vma->vm_flags |= VM_MAYWRITE;
37410+ else
37411+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
37412+ vma->vm_flags &= ~VM_MAYWRITE;
37413+ return;
37414+ }
37415+ i++;
37416+ }
37417+ return;
37418+
37419+ case PT_GNU_RELRO:
37420+ if (!is_relro)
37421+ continue;
37422+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
37423+ vma->vm_flags &= ~VM_MAYWRITE;
37424+ return;
37425+ }
37426+ }
37427+}
37428+#endif
37429+
37430 static int __init init_elf_binfmt(void)
37431 {
37432 return register_binfmt(&elf_format);
37433diff -urNp linux-3.0.4/fs/binfmt_flat.c linux-3.0.4/fs/binfmt_flat.c
37434--- linux-3.0.4/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
37435+++ linux-3.0.4/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
37436@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
37437 realdatastart = (unsigned long) -ENOMEM;
37438 printk("Unable to allocate RAM for process data, errno %d\n",
37439 (int)-realdatastart);
37440+ down_write(&current->mm->mmap_sem);
37441 do_munmap(current->mm, textpos, text_len);
37442+ up_write(&current->mm->mmap_sem);
37443 ret = realdatastart;
37444 goto err;
37445 }
37446@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
37447 }
37448 if (IS_ERR_VALUE(result)) {
37449 printk("Unable to read data+bss, errno %d\n", (int)-result);
37450+ down_write(&current->mm->mmap_sem);
37451 do_munmap(current->mm, textpos, text_len);
37452 do_munmap(current->mm, realdatastart, len);
37453+ up_write(&current->mm->mmap_sem);
37454 ret = result;
37455 goto err;
37456 }
37457@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
37458 }
37459 if (IS_ERR_VALUE(result)) {
37460 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
37461+ down_write(&current->mm->mmap_sem);
37462 do_munmap(current->mm, textpos, text_len + data_len + extra +
37463 MAX_SHARED_LIBS * sizeof(unsigned long));
37464+ up_write(&current->mm->mmap_sem);
37465 ret = result;
37466 goto err;
37467 }
37468diff -urNp linux-3.0.4/fs/bio.c linux-3.0.4/fs/bio.c
37469--- linux-3.0.4/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
37470+++ linux-3.0.4/fs/bio.c 2011-08-23 21:47:56.000000000 -0400
37471@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
37472 const int read = bio_data_dir(bio) == READ;
37473 struct bio_map_data *bmd = bio->bi_private;
37474 int i;
37475- char *p = bmd->sgvecs[0].iov_base;
37476+ char *p = (__force char *)bmd->sgvecs[0].iov_base;
37477
37478 __bio_for_each_segment(bvec, bio, i, 0) {
37479 char *addr = page_address(bvec->bv_page);
37480diff -urNp linux-3.0.4/fs/block_dev.c linux-3.0.4/fs/block_dev.c
37481--- linux-3.0.4/fs/block_dev.c 2011-07-21 22:17:23.000000000 -0400
37482+++ linux-3.0.4/fs/block_dev.c 2011-08-23 21:47:56.000000000 -0400
37483@@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
37484 else if (bdev->bd_contains == bdev)
37485 return true; /* is a whole device which isn't held */
37486
37487- else if (whole->bd_holder == bd_may_claim)
37488+ else if (whole->bd_holder == (void *)bd_may_claim)
37489 return true; /* is a partition of a device that is being partitioned */
37490 else if (whole->bd_holder != NULL)
37491 return false; /* is a partition of a held device */
37492diff -urNp linux-3.0.4/fs/btrfs/ctree.c linux-3.0.4/fs/btrfs/ctree.c
37493--- linux-3.0.4/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
37494+++ linux-3.0.4/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
37495@@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
37496 free_extent_buffer(buf);
37497 add_root_to_dirty_list(root);
37498 } else {
37499- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
37500- parent_start = parent->start;
37501- else
37502+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
37503+ if (parent)
37504+ parent_start = parent->start;
37505+ else
37506+ parent_start = 0;
37507+ } else
37508 parent_start = 0;
37509
37510 WARN_ON(trans->transid != btrfs_header_generation(parent));
37511diff -urNp linux-3.0.4/fs/btrfs/inode.c linux-3.0.4/fs/btrfs/inode.c
37512--- linux-3.0.4/fs/btrfs/inode.c 2011-07-21 22:17:23.000000000 -0400
37513+++ linux-3.0.4/fs/btrfs/inode.c 2011-08-23 21:48:14.000000000 -0400
37514@@ -6895,7 +6895,7 @@ fail:
37515 return -ENOMEM;
37516 }
37517
37518-static int btrfs_getattr(struct vfsmount *mnt,
37519+int btrfs_getattr(struct vfsmount *mnt,
37520 struct dentry *dentry, struct kstat *stat)
37521 {
37522 struct inode *inode = dentry->d_inode;
37523@@ -6907,6 +6907,14 @@ static int btrfs_getattr(struct vfsmount
37524 return 0;
37525 }
37526
37527+EXPORT_SYMBOL(btrfs_getattr);
37528+
37529+dev_t get_btrfs_dev_from_inode(struct inode *inode)
37530+{
37531+ return BTRFS_I(inode)->root->anon_super.s_dev;
37532+}
37533+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
37534+
37535 /*
37536 * If a file is moved, it will inherit the cow and compression flags of the new
37537 * directory.
37538diff -urNp linux-3.0.4/fs/btrfs/ioctl.c linux-3.0.4/fs/btrfs/ioctl.c
37539--- linux-3.0.4/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
37540+++ linux-3.0.4/fs/btrfs/ioctl.c 2011-08-23 21:48:14.000000000 -0400
37541@@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
37542 for (i = 0; i < num_types; i++) {
37543 struct btrfs_space_info *tmp;
37544
37545+ /* Don't copy in more than we allocated */
37546 if (!slot_count)
37547 break;
37548
37549+ slot_count--;
37550+
37551 info = NULL;
37552 rcu_read_lock();
37553 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
37554@@ -2700,10 +2703,7 @@ long btrfs_ioctl_space_info(struct btrfs
37555 memcpy(dest, &space, sizeof(space));
37556 dest++;
37557 space_args.total_spaces++;
37558- slot_count--;
37559 }
37560- if (!slot_count)
37561- break;
37562 }
37563 up_read(&info->groups_sem);
37564 }
37565diff -urNp linux-3.0.4/fs/btrfs/relocation.c linux-3.0.4/fs/btrfs/relocation.c
37566--- linux-3.0.4/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
37567+++ linux-3.0.4/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
37568@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
37569 }
37570 spin_unlock(&rc->reloc_root_tree.lock);
37571
37572- BUG_ON((struct btrfs_root *)node->data != root);
37573+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
37574
37575 if (!del) {
37576 spin_lock(&rc->reloc_root_tree.lock);
37577diff -urNp linux-3.0.4/fs/cachefiles/bind.c linux-3.0.4/fs/cachefiles/bind.c
37578--- linux-3.0.4/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
37579+++ linux-3.0.4/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
37580@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
37581 args);
37582
37583 /* start by checking things over */
37584- ASSERT(cache->fstop_percent >= 0 &&
37585- cache->fstop_percent < cache->fcull_percent &&
37586+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
37587 cache->fcull_percent < cache->frun_percent &&
37588 cache->frun_percent < 100);
37589
37590- ASSERT(cache->bstop_percent >= 0 &&
37591- cache->bstop_percent < cache->bcull_percent &&
37592+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
37593 cache->bcull_percent < cache->brun_percent &&
37594 cache->brun_percent < 100);
37595
37596diff -urNp linux-3.0.4/fs/cachefiles/daemon.c linux-3.0.4/fs/cachefiles/daemon.c
37597--- linux-3.0.4/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
37598+++ linux-3.0.4/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
37599@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
37600 if (n > buflen)
37601 return -EMSGSIZE;
37602
37603- if (copy_to_user(_buffer, buffer, n) != 0)
37604+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
37605 return -EFAULT;
37606
37607 return n;
37608@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
37609 if (test_bit(CACHEFILES_DEAD, &cache->flags))
37610 return -EIO;
37611
37612- if (datalen < 0 || datalen > PAGE_SIZE - 1)
37613+ if (datalen > PAGE_SIZE - 1)
37614 return -EOPNOTSUPP;
37615
37616 /* drag the command string into the kernel so we can parse it */
37617@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
37618 if (args[0] != '%' || args[1] != '\0')
37619 return -EINVAL;
37620
37621- if (fstop < 0 || fstop >= cache->fcull_percent)
37622+ if (fstop >= cache->fcull_percent)
37623 return cachefiles_daemon_range_error(cache, args);
37624
37625 cache->fstop_percent = fstop;
37626@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
37627 if (args[0] != '%' || args[1] != '\0')
37628 return -EINVAL;
37629
37630- if (bstop < 0 || bstop >= cache->bcull_percent)
37631+ if (bstop >= cache->bcull_percent)
37632 return cachefiles_daemon_range_error(cache, args);
37633
37634 cache->bstop_percent = bstop;
37635diff -urNp linux-3.0.4/fs/cachefiles/internal.h linux-3.0.4/fs/cachefiles/internal.h
37636--- linux-3.0.4/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
37637+++ linux-3.0.4/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
37638@@ -57,7 +57,7 @@ struct cachefiles_cache {
37639 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
37640 struct rb_root active_nodes; /* active nodes (can't be culled) */
37641 rwlock_t active_lock; /* lock for active_nodes */
37642- atomic_t gravecounter; /* graveyard uniquifier */
37643+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
37644 unsigned frun_percent; /* when to stop culling (% files) */
37645 unsigned fcull_percent; /* when to start culling (% files) */
37646 unsigned fstop_percent; /* when to stop allocating (% files) */
37647@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
37648 * proc.c
37649 */
37650 #ifdef CONFIG_CACHEFILES_HISTOGRAM
37651-extern atomic_t cachefiles_lookup_histogram[HZ];
37652-extern atomic_t cachefiles_mkdir_histogram[HZ];
37653-extern atomic_t cachefiles_create_histogram[HZ];
37654+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37655+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37656+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
37657
37658 extern int __init cachefiles_proc_init(void);
37659 extern void cachefiles_proc_cleanup(void);
37660 static inline
37661-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
37662+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
37663 {
37664 unsigned long jif = jiffies - start_jif;
37665 if (jif >= HZ)
37666 jif = HZ - 1;
37667- atomic_inc(&histogram[jif]);
37668+ atomic_inc_unchecked(&histogram[jif]);
37669 }
37670
37671 #else
37672diff -urNp linux-3.0.4/fs/cachefiles/namei.c linux-3.0.4/fs/cachefiles/namei.c
37673--- linux-3.0.4/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
37674+++ linux-3.0.4/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
37675@@ -318,7 +318,7 @@ try_again:
37676 /* first step is to make up a grave dentry in the graveyard */
37677 sprintf(nbuffer, "%08x%08x",
37678 (uint32_t) get_seconds(),
37679- (uint32_t) atomic_inc_return(&cache->gravecounter));
37680+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
37681
37682 /* do the multiway lock magic */
37683 trap = lock_rename(cache->graveyard, dir);
37684diff -urNp linux-3.0.4/fs/cachefiles/proc.c linux-3.0.4/fs/cachefiles/proc.c
37685--- linux-3.0.4/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
37686+++ linux-3.0.4/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
37687@@ -14,9 +14,9 @@
37688 #include <linux/seq_file.h>
37689 #include "internal.h"
37690
37691-atomic_t cachefiles_lookup_histogram[HZ];
37692-atomic_t cachefiles_mkdir_histogram[HZ];
37693-atomic_t cachefiles_create_histogram[HZ];
37694+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37695+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37696+atomic_unchecked_t cachefiles_create_histogram[HZ];
37697
37698 /*
37699 * display the latency histogram
37700@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
37701 return 0;
37702 default:
37703 index = (unsigned long) v - 3;
37704- x = atomic_read(&cachefiles_lookup_histogram[index]);
37705- y = atomic_read(&cachefiles_mkdir_histogram[index]);
37706- z = atomic_read(&cachefiles_create_histogram[index]);
37707+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
37708+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
37709+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
37710 if (x == 0 && y == 0 && z == 0)
37711 return 0;
37712
37713diff -urNp linux-3.0.4/fs/cachefiles/rdwr.c linux-3.0.4/fs/cachefiles/rdwr.c
37714--- linux-3.0.4/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
37715+++ linux-3.0.4/fs/cachefiles/rdwr.c 2011-08-23 21:47:56.000000000 -0400
37716@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
37717 old_fs = get_fs();
37718 set_fs(KERNEL_DS);
37719 ret = file->f_op->write(
37720- file, (const void __user *) data, len, &pos);
37721+ file, (__force const void __user *) data, len, &pos);
37722 set_fs(old_fs);
37723 kunmap(page);
37724 if (ret != len)
37725diff -urNp linux-3.0.4/fs/ceph/dir.c linux-3.0.4/fs/ceph/dir.c
37726--- linux-3.0.4/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
37727+++ linux-3.0.4/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
37728@@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
37729 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
37730 struct ceph_mds_client *mdsc = fsc->mdsc;
37731 unsigned frag = fpos_frag(filp->f_pos);
37732- int off = fpos_off(filp->f_pos);
37733+ unsigned int off = fpos_off(filp->f_pos);
37734 int err;
37735 u32 ftype;
37736 struct ceph_mds_reply_info_parsed *rinfo;
37737diff -urNp linux-3.0.4/fs/cifs/cifs_debug.c linux-3.0.4/fs/cifs/cifs_debug.c
37738--- linux-3.0.4/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
37739+++ linux-3.0.4/fs/cifs/cifs_debug.c 2011-08-25 17:18:05.000000000 -0400
37740@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
37741
37742 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
37743 #ifdef CONFIG_CIFS_STATS2
37744- atomic_set(&totBufAllocCount, 0);
37745- atomic_set(&totSmBufAllocCount, 0);
37746+ atomic_set_unchecked(&totBufAllocCount, 0);
37747+ atomic_set_unchecked(&totSmBufAllocCount, 0);
37748 #endif /* CONFIG_CIFS_STATS2 */
37749 spin_lock(&cifs_tcp_ses_lock);
37750 list_for_each(tmp1, &cifs_tcp_ses_list) {
37751@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
37752 tcon = list_entry(tmp3,
37753 struct cifs_tcon,
37754 tcon_list);
37755- atomic_set(&tcon->num_smbs_sent, 0);
37756- atomic_set(&tcon->num_writes, 0);
37757- atomic_set(&tcon->num_reads, 0);
37758- atomic_set(&tcon->num_oplock_brks, 0);
37759- atomic_set(&tcon->num_opens, 0);
37760- atomic_set(&tcon->num_posixopens, 0);
37761- atomic_set(&tcon->num_posixmkdirs, 0);
37762- atomic_set(&tcon->num_closes, 0);
37763- atomic_set(&tcon->num_deletes, 0);
37764- atomic_set(&tcon->num_mkdirs, 0);
37765- atomic_set(&tcon->num_rmdirs, 0);
37766- atomic_set(&tcon->num_renames, 0);
37767- atomic_set(&tcon->num_t2renames, 0);
37768- atomic_set(&tcon->num_ffirst, 0);
37769- atomic_set(&tcon->num_fnext, 0);
37770- atomic_set(&tcon->num_fclose, 0);
37771- atomic_set(&tcon->num_hardlinks, 0);
37772- atomic_set(&tcon->num_symlinks, 0);
37773- atomic_set(&tcon->num_locks, 0);
37774+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
37775+ atomic_set_unchecked(&tcon->num_writes, 0);
37776+ atomic_set_unchecked(&tcon->num_reads, 0);
37777+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
37778+ atomic_set_unchecked(&tcon->num_opens, 0);
37779+ atomic_set_unchecked(&tcon->num_posixopens, 0);
37780+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
37781+ atomic_set_unchecked(&tcon->num_closes, 0);
37782+ atomic_set_unchecked(&tcon->num_deletes, 0);
37783+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
37784+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
37785+ atomic_set_unchecked(&tcon->num_renames, 0);
37786+ atomic_set_unchecked(&tcon->num_t2renames, 0);
37787+ atomic_set_unchecked(&tcon->num_ffirst, 0);
37788+ atomic_set_unchecked(&tcon->num_fnext, 0);
37789+ atomic_set_unchecked(&tcon->num_fclose, 0);
37790+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
37791+ atomic_set_unchecked(&tcon->num_symlinks, 0);
37792+ atomic_set_unchecked(&tcon->num_locks, 0);
37793 }
37794 }
37795 }
37796@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
37797 smBufAllocCount.counter, cifs_min_small);
37798 #ifdef CONFIG_CIFS_STATS2
37799 seq_printf(m, "Total Large %d Small %d Allocations\n",
37800- atomic_read(&totBufAllocCount),
37801- atomic_read(&totSmBufAllocCount));
37802+ atomic_read_unchecked(&totBufAllocCount),
37803+ atomic_read_unchecked(&totSmBufAllocCount));
37804 #endif /* CONFIG_CIFS_STATS2 */
37805
37806 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
37807@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
37808 if (tcon->need_reconnect)
37809 seq_puts(m, "\tDISCONNECTED ");
37810 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
37811- atomic_read(&tcon->num_smbs_sent),
37812- atomic_read(&tcon->num_oplock_brks));
37813+ atomic_read_unchecked(&tcon->num_smbs_sent),
37814+ atomic_read_unchecked(&tcon->num_oplock_brks));
37815 seq_printf(m, "\nReads: %d Bytes: %lld",
37816- atomic_read(&tcon->num_reads),
37817+ atomic_read_unchecked(&tcon->num_reads),
37818 (long long)(tcon->bytes_read));
37819 seq_printf(m, "\nWrites: %d Bytes: %lld",
37820- atomic_read(&tcon->num_writes),
37821+ atomic_read_unchecked(&tcon->num_writes),
37822 (long long)(tcon->bytes_written));
37823 seq_printf(m, "\nFlushes: %d",
37824- atomic_read(&tcon->num_flushes));
37825+ atomic_read_unchecked(&tcon->num_flushes));
37826 seq_printf(m, "\nLocks: %d HardLinks: %d "
37827 "Symlinks: %d",
37828- atomic_read(&tcon->num_locks),
37829- atomic_read(&tcon->num_hardlinks),
37830- atomic_read(&tcon->num_symlinks));
37831+ atomic_read_unchecked(&tcon->num_locks),
37832+ atomic_read_unchecked(&tcon->num_hardlinks),
37833+ atomic_read_unchecked(&tcon->num_symlinks));
37834 seq_printf(m, "\nOpens: %d Closes: %d "
37835 "Deletes: %d",
37836- atomic_read(&tcon->num_opens),
37837- atomic_read(&tcon->num_closes),
37838- atomic_read(&tcon->num_deletes));
37839+ atomic_read_unchecked(&tcon->num_opens),
37840+ atomic_read_unchecked(&tcon->num_closes),
37841+ atomic_read_unchecked(&tcon->num_deletes));
37842 seq_printf(m, "\nPosix Opens: %d "
37843 "Posix Mkdirs: %d",
37844- atomic_read(&tcon->num_posixopens),
37845- atomic_read(&tcon->num_posixmkdirs));
37846+ atomic_read_unchecked(&tcon->num_posixopens),
37847+ atomic_read_unchecked(&tcon->num_posixmkdirs));
37848 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
37849- atomic_read(&tcon->num_mkdirs),
37850- atomic_read(&tcon->num_rmdirs));
37851+ atomic_read_unchecked(&tcon->num_mkdirs),
37852+ atomic_read_unchecked(&tcon->num_rmdirs));
37853 seq_printf(m, "\nRenames: %d T2 Renames %d",
37854- atomic_read(&tcon->num_renames),
37855- atomic_read(&tcon->num_t2renames));
37856+ atomic_read_unchecked(&tcon->num_renames),
37857+ atomic_read_unchecked(&tcon->num_t2renames));
37858 seq_printf(m, "\nFindFirst: %d FNext %d "
37859 "FClose %d",
37860- atomic_read(&tcon->num_ffirst),
37861- atomic_read(&tcon->num_fnext),
37862- atomic_read(&tcon->num_fclose));
37863+ atomic_read_unchecked(&tcon->num_ffirst),
37864+ atomic_read_unchecked(&tcon->num_fnext),
37865+ atomic_read_unchecked(&tcon->num_fclose));
37866 }
37867 }
37868 }
37869diff -urNp linux-3.0.4/fs/cifs/cifsfs.c linux-3.0.4/fs/cifs/cifsfs.c
37870--- linux-3.0.4/fs/cifs/cifsfs.c 2011-08-23 21:44:40.000000000 -0400
37871+++ linux-3.0.4/fs/cifs/cifsfs.c 2011-08-25 17:18:05.000000000 -0400
37872@@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
37873 cifs_req_cachep = kmem_cache_create("cifs_request",
37874 CIFSMaxBufSize +
37875 MAX_CIFS_HDR_SIZE, 0,
37876- SLAB_HWCACHE_ALIGN, NULL);
37877+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
37878 if (cifs_req_cachep == NULL)
37879 return -ENOMEM;
37880
37881@@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
37882 efficient to alloc 1 per page off the slab compared to 17K (5page)
37883 alloc of large cifs buffers even when page debugging is on */
37884 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
37885- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
37886+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
37887 NULL);
37888 if (cifs_sm_req_cachep == NULL) {
37889 mempool_destroy(cifs_req_poolp);
37890@@ -1106,8 +1106,8 @@ init_cifs(void)
37891 atomic_set(&bufAllocCount, 0);
37892 atomic_set(&smBufAllocCount, 0);
37893 #ifdef CONFIG_CIFS_STATS2
37894- atomic_set(&totBufAllocCount, 0);
37895- atomic_set(&totSmBufAllocCount, 0);
37896+ atomic_set_unchecked(&totBufAllocCount, 0);
37897+ atomic_set_unchecked(&totSmBufAllocCount, 0);
37898 #endif /* CONFIG_CIFS_STATS2 */
37899
37900 atomic_set(&midCount, 0);
37901diff -urNp linux-3.0.4/fs/cifs/cifsglob.h linux-3.0.4/fs/cifs/cifsglob.h
37902--- linux-3.0.4/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
37903+++ linux-3.0.4/fs/cifs/cifsglob.h 2011-08-25 17:18:05.000000000 -0400
37904@@ -381,28 +381,28 @@ struct cifs_tcon {
37905 __u16 Flags; /* optional support bits */
37906 enum statusEnum tidStatus;
37907 #ifdef CONFIG_CIFS_STATS
37908- atomic_t num_smbs_sent;
37909- atomic_t num_writes;
37910- atomic_t num_reads;
37911- atomic_t num_flushes;
37912- atomic_t num_oplock_brks;
37913- atomic_t num_opens;
37914- atomic_t num_closes;
37915- atomic_t num_deletes;
37916- atomic_t num_mkdirs;
37917- atomic_t num_posixopens;
37918- atomic_t num_posixmkdirs;
37919- atomic_t num_rmdirs;
37920- atomic_t num_renames;
37921- atomic_t num_t2renames;
37922- atomic_t num_ffirst;
37923- atomic_t num_fnext;
37924- atomic_t num_fclose;
37925- atomic_t num_hardlinks;
37926- atomic_t num_symlinks;
37927- atomic_t num_locks;
37928- atomic_t num_acl_get;
37929- atomic_t num_acl_set;
37930+ atomic_unchecked_t num_smbs_sent;
37931+ atomic_unchecked_t num_writes;
37932+ atomic_unchecked_t num_reads;
37933+ atomic_unchecked_t num_flushes;
37934+ atomic_unchecked_t num_oplock_brks;
37935+ atomic_unchecked_t num_opens;
37936+ atomic_unchecked_t num_closes;
37937+ atomic_unchecked_t num_deletes;
37938+ atomic_unchecked_t num_mkdirs;
37939+ atomic_unchecked_t num_posixopens;
37940+ atomic_unchecked_t num_posixmkdirs;
37941+ atomic_unchecked_t num_rmdirs;
37942+ atomic_unchecked_t num_renames;
37943+ atomic_unchecked_t num_t2renames;
37944+ atomic_unchecked_t num_ffirst;
37945+ atomic_unchecked_t num_fnext;
37946+ atomic_unchecked_t num_fclose;
37947+ atomic_unchecked_t num_hardlinks;
37948+ atomic_unchecked_t num_symlinks;
37949+ atomic_unchecked_t num_locks;
37950+ atomic_unchecked_t num_acl_get;
37951+ atomic_unchecked_t num_acl_set;
37952 #ifdef CONFIG_CIFS_STATS2
37953 unsigned long long time_writes;
37954 unsigned long long time_reads;
37955@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
37956 }
37957
37958 #ifdef CONFIG_CIFS_STATS
37959-#define cifs_stats_inc atomic_inc
37960+#define cifs_stats_inc atomic_inc_unchecked
37961
37962 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
37963 unsigned int bytes)
37964@@ -911,8 +911,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
37965 /* Various Debug counters */
37966 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
37967 #ifdef CONFIG_CIFS_STATS2
37968-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
37969-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
37970+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
37971+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
37972 #endif
37973 GLOBAL_EXTERN atomic_t smBufAllocCount;
37974 GLOBAL_EXTERN atomic_t midCount;
37975diff -urNp linux-3.0.4/fs/cifs/link.c linux-3.0.4/fs/cifs/link.c
37976--- linux-3.0.4/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
37977+++ linux-3.0.4/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
37978@@ -587,7 +587,7 @@ symlink_exit:
37979
37980 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
37981 {
37982- char *p = nd_get_link(nd);
37983+ const char *p = nd_get_link(nd);
37984 if (!IS_ERR(p))
37985 kfree(p);
37986 }
37987diff -urNp linux-3.0.4/fs/cifs/misc.c linux-3.0.4/fs/cifs/misc.c
37988--- linux-3.0.4/fs/cifs/misc.c 2011-07-21 22:17:23.000000000 -0400
37989+++ linux-3.0.4/fs/cifs/misc.c 2011-08-25 17:18:05.000000000 -0400
37990@@ -156,7 +156,7 @@ cifs_buf_get(void)
37991 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
37992 atomic_inc(&bufAllocCount);
37993 #ifdef CONFIG_CIFS_STATS2
37994- atomic_inc(&totBufAllocCount);
37995+ atomic_inc_unchecked(&totBufAllocCount);
37996 #endif /* CONFIG_CIFS_STATS2 */
37997 }
37998
37999@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
38000 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
38001 atomic_inc(&smBufAllocCount);
38002 #ifdef CONFIG_CIFS_STATS2
38003- atomic_inc(&totSmBufAllocCount);
38004+ atomic_inc_unchecked(&totSmBufAllocCount);
38005 #endif /* CONFIG_CIFS_STATS2 */
38006
38007 }
38008diff -urNp linux-3.0.4/fs/coda/cache.c linux-3.0.4/fs/coda/cache.c
38009--- linux-3.0.4/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
38010+++ linux-3.0.4/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
38011@@ -24,7 +24,7 @@
38012 #include "coda_linux.h"
38013 #include "coda_cache.h"
38014
38015-static atomic_t permission_epoch = ATOMIC_INIT(0);
38016+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
38017
38018 /* replace or extend an acl cache hit */
38019 void coda_cache_enter(struct inode *inode, int mask)
38020@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
38021 struct coda_inode_info *cii = ITOC(inode);
38022
38023 spin_lock(&cii->c_lock);
38024- cii->c_cached_epoch = atomic_read(&permission_epoch);
38025+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
38026 if (cii->c_uid != current_fsuid()) {
38027 cii->c_uid = current_fsuid();
38028 cii->c_cached_perm = mask;
38029@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
38030 {
38031 struct coda_inode_info *cii = ITOC(inode);
38032 spin_lock(&cii->c_lock);
38033- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
38034+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
38035 spin_unlock(&cii->c_lock);
38036 }
38037
38038 /* remove all acl caches */
38039 void coda_cache_clear_all(struct super_block *sb)
38040 {
38041- atomic_inc(&permission_epoch);
38042+ atomic_inc_unchecked(&permission_epoch);
38043 }
38044
38045
38046@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
38047 spin_lock(&cii->c_lock);
38048 hit = (mask & cii->c_cached_perm) == mask &&
38049 cii->c_uid == current_fsuid() &&
38050- cii->c_cached_epoch == atomic_read(&permission_epoch);
38051+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
38052 spin_unlock(&cii->c_lock);
38053
38054 return hit;
38055diff -urNp linux-3.0.4/fs/compat_binfmt_elf.c linux-3.0.4/fs/compat_binfmt_elf.c
38056--- linux-3.0.4/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
38057+++ linux-3.0.4/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
38058@@ -30,11 +30,13 @@
38059 #undef elf_phdr
38060 #undef elf_shdr
38061 #undef elf_note
38062+#undef elf_dyn
38063 #undef elf_addr_t
38064 #define elfhdr elf32_hdr
38065 #define elf_phdr elf32_phdr
38066 #define elf_shdr elf32_shdr
38067 #define elf_note elf32_note
38068+#define elf_dyn Elf32_Dyn
38069 #define elf_addr_t Elf32_Addr
38070
38071 /*
38072diff -urNp linux-3.0.4/fs/compat.c linux-3.0.4/fs/compat.c
38073--- linux-3.0.4/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
38074+++ linux-3.0.4/fs/compat.c 2011-08-23 22:49:33.000000000 -0400
38075@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
38076 goto out;
38077
38078 ret = -EINVAL;
38079- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
38080+ if (nr_segs > UIO_MAXIOV)
38081 goto out;
38082 if (nr_segs > fast_segs) {
38083 ret = -ENOMEM;
38084@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
38085
38086 struct compat_readdir_callback {
38087 struct compat_old_linux_dirent __user *dirent;
38088+ struct file * file;
38089 int result;
38090 };
38091
38092@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
38093 buf->result = -EOVERFLOW;
38094 return -EOVERFLOW;
38095 }
38096+
38097+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38098+ return 0;
38099+
38100 buf->result++;
38101 dirent = buf->dirent;
38102 if (!access_ok(VERIFY_WRITE, dirent,
38103@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
38104
38105 buf.result = 0;
38106 buf.dirent = dirent;
38107+ buf.file = file;
38108
38109 error = vfs_readdir(file, compat_fillonedir, &buf);
38110 if (buf.result)
38111@@ -917,6 +923,7 @@ struct compat_linux_dirent {
38112 struct compat_getdents_callback {
38113 struct compat_linux_dirent __user *current_dir;
38114 struct compat_linux_dirent __user *previous;
38115+ struct file * file;
38116 int count;
38117 int error;
38118 };
38119@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
38120 buf->error = -EOVERFLOW;
38121 return -EOVERFLOW;
38122 }
38123+
38124+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38125+ return 0;
38126+
38127 dirent = buf->previous;
38128 if (dirent) {
38129 if (__put_user(offset, &dirent->d_off))
38130@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
38131 buf.previous = NULL;
38132 buf.count = count;
38133 buf.error = 0;
38134+ buf.file = file;
38135
38136 error = vfs_readdir(file, compat_filldir, &buf);
38137 if (error >= 0)
38138@@ -1006,6 +1018,7 @@ out:
38139 struct compat_getdents_callback64 {
38140 struct linux_dirent64 __user *current_dir;
38141 struct linux_dirent64 __user *previous;
38142+ struct file * file;
38143 int count;
38144 int error;
38145 };
38146@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
38147 buf->error = -EINVAL; /* only used if we fail.. */
38148 if (reclen > buf->count)
38149 return -EINVAL;
38150+
38151+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38152+ return 0;
38153+
38154 dirent = buf->previous;
38155
38156 if (dirent) {
38157@@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
38158 buf.previous = NULL;
38159 buf.count = count;
38160 buf.error = 0;
38161+ buf.file = file;
38162
38163 error = vfs_readdir(file, compat_filldir64, &buf);
38164 if (error >= 0)
38165@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
38166 struct fdtable *fdt;
38167 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
38168
38169+ pax_track_stack();
38170+
38171 if (n < 0)
38172 goto out_nofds;
38173
38174diff -urNp linux-3.0.4/fs/compat_ioctl.c linux-3.0.4/fs/compat_ioctl.c
38175--- linux-3.0.4/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
38176+++ linux-3.0.4/fs/compat_ioctl.c 2011-08-23 21:47:56.000000000 -0400
38177@@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
38178
38179 err = get_user(palp, &up->palette);
38180 err |= get_user(length, &up->length);
38181+ if (err)
38182+ return -EFAULT;
38183
38184 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
38185 err = put_user(compat_ptr(palp), &up_native->palette);
38186@@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
38187 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
38188 {
38189 unsigned int a, b;
38190- a = *(unsigned int *)p;
38191- b = *(unsigned int *)q;
38192+ a = *(const unsigned int *)p;
38193+ b = *(const unsigned int *)q;
38194 if (a > b)
38195 return 1;
38196 if (a < b)
38197diff -urNp linux-3.0.4/fs/configfs/dir.c linux-3.0.4/fs/configfs/dir.c
38198--- linux-3.0.4/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
38199+++ linux-3.0.4/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
38200@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
38201 }
38202 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
38203 struct configfs_dirent *next;
38204- const char * name;
38205+ const unsigned char * name;
38206+ char d_name[sizeof(next->s_dentry->d_iname)];
38207 int len;
38208 struct inode *inode = NULL;
38209
38210@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
38211 continue;
38212
38213 name = configfs_get_name(next);
38214- len = strlen(name);
38215+ if (next->s_dentry && name == next->s_dentry->d_iname) {
38216+ len = next->s_dentry->d_name.len;
38217+ memcpy(d_name, name, len);
38218+ name = d_name;
38219+ } else
38220+ len = strlen(name);
38221
38222 /*
38223 * We'll have a dentry and an inode for
38224diff -urNp linux-3.0.4/fs/dcache.c linux-3.0.4/fs/dcache.c
38225--- linux-3.0.4/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
38226+++ linux-3.0.4/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
38227@@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
38228 mempages -= reserve;
38229
38230 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
38231- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
38232+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
38233
38234 dcache_init();
38235 inode_init();
38236diff -urNp linux-3.0.4/fs/ecryptfs/inode.c linux-3.0.4/fs/ecryptfs/inode.c
38237--- linux-3.0.4/fs/ecryptfs/inode.c 2011-08-23 21:44:40.000000000 -0400
38238+++ linux-3.0.4/fs/ecryptfs/inode.c 2011-08-23 21:47:56.000000000 -0400
38239@@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
38240 old_fs = get_fs();
38241 set_fs(get_ds());
38242 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
38243- (char __user *)lower_buf,
38244+ (__force char __user *)lower_buf,
38245 lower_bufsiz);
38246 set_fs(old_fs);
38247 if (rc < 0)
38248@@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
38249 }
38250 old_fs = get_fs();
38251 set_fs(get_ds());
38252- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
38253+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
38254 set_fs(old_fs);
38255 if (rc < 0) {
38256 kfree(buf);
38257@@ -765,7 +765,7 @@ out:
38258 static void
38259 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
38260 {
38261- char *buf = nd_get_link(nd);
38262+ const char *buf = nd_get_link(nd);
38263 if (!IS_ERR(buf)) {
38264 /* Free the char* */
38265 kfree(buf);
38266diff -urNp linux-3.0.4/fs/ecryptfs/miscdev.c linux-3.0.4/fs/ecryptfs/miscdev.c
38267--- linux-3.0.4/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
38268+++ linux-3.0.4/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
38269@@ -328,7 +328,7 @@ check_list:
38270 goto out_unlock_msg_ctx;
38271 i = 5;
38272 if (msg_ctx->msg) {
38273- if (copy_to_user(&buf[i], packet_length, packet_length_size))
38274+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
38275 goto out_unlock_msg_ctx;
38276 i += packet_length_size;
38277 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
38278diff -urNp linux-3.0.4/fs/exec.c linux-3.0.4/fs/exec.c
38279--- linux-3.0.4/fs/exec.c 2011-07-21 22:17:23.000000000 -0400
38280+++ linux-3.0.4/fs/exec.c 2011-08-25 17:26:58.000000000 -0400
38281@@ -55,12 +55,24 @@
38282 #include <linux/pipe_fs_i.h>
38283 #include <linux/oom.h>
38284 #include <linux/compat.h>
38285+#include <linux/random.h>
38286+#include <linux/seq_file.h>
38287+
38288+#ifdef CONFIG_PAX_REFCOUNT
38289+#include <linux/kallsyms.h>
38290+#include <linux/kdebug.h>
38291+#endif
38292
38293 #include <asm/uaccess.h>
38294 #include <asm/mmu_context.h>
38295 #include <asm/tlb.h>
38296 #include "internal.h"
38297
38298+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
38299+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
38300+EXPORT_SYMBOL(pax_set_initial_flags_func);
38301+#endif
38302+
38303 int core_uses_pid;
38304 char core_pattern[CORENAME_MAX_SIZE] = "core";
38305 unsigned int core_pipe_limit;
38306@@ -70,7 +82,7 @@ struct core_name {
38307 char *corename;
38308 int used, size;
38309 };
38310-static atomic_t call_count = ATOMIC_INIT(1);
38311+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
38312
38313 /* The maximal length of core_pattern is also specified in sysctl.c */
38314
38315@@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
38316 char *tmp = getname(library);
38317 int error = PTR_ERR(tmp);
38318 static const struct open_flags uselib_flags = {
38319- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
38320+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
38321 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
38322 .intent = LOOKUP_OPEN
38323 };
38324@@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
38325 int write)
38326 {
38327 struct page *page;
38328- int ret;
38329
38330-#ifdef CONFIG_STACK_GROWSUP
38331- if (write) {
38332- ret = expand_downwards(bprm->vma, pos);
38333- if (ret < 0)
38334- return NULL;
38335- }
38336-#endif
38337- ret = get_user_pages(current, bprm->mm, pos,
38338- 1, write, 1, &page, NULL);
38339- if (ret <= 0)
38340+ if (0 > expand_downwards(bprm->vma, pos))
38341+ return NULL;
38342+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
38343 return NULL;
38344
38345 if (write) {
38346@@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
38347 vma->vm_end = STACK_TOP_MAX;
38348 vma->vm_start = vma->vm_end - PAGE_SIZE;
38349 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
38350+
38351+#ifdef CONFIG_PAX_SEGMEXEC
38352+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
38353+#endif
38354+
38355 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
38356 INIT_LIST_HEAD(&vma->anon_vma_chain);
38357
38358@@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
38359 mm->stack_vm = mm->total_vm = 1;
38360 up_write(&mm->mmap_sem);
38361 bprm->p = vma->vm_end - sizeof(void *);
38362+
38363+#ifdef CONFIG_PAX_RANDUSTACK
38364+ if (randomize_va_space)
38365+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
38366+#endif
38367+
38368 return 0;
38369 err:
38370 up_write(&mm->mmap_sem);
38371@@ -403,19 +418,7 @@ err:
38372 return err;
38373 }
38374
38375-struct user_arg_ptr {
38376-#ifdef CONFIG_COMPAT
38377- bool is_compat;
38378-#endif
38379- union {
38380- const char __user *const __user *native;
38381-#ifdef CONFIG_COMPAT
38382- compat_uptr_t __user *compat;
38383-#endif
38384- } ptr;
38385-};
38386-
38387-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
38388+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
38389 {
38390 const char __user *native;
38391
38392@@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
38393 int r;
38394 mm_segment_t oldfs = get_fs();
38395 struct user_arg_ptr argv = {
38396- .ptr.native = (const char __user *const __user *)__argv,
38397+ .ptr.native = (__force const char __user *const __user *)__argv,
38398 };
38399
38400 set_fs(KERNEL_DS);
38401@@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
38402 unsigned long new_end = old_end - shift;
38403 struct mmu_gather tlb;
38404
38405- BUG_ON(new_start > new_end);
38406+ if (new_start >= new_end || new_start < mmap_min_addr)
38407+ return -ENOMEM;
38408
38409 /*
38410 * ensure there are no vmas between where we want to go
38411@@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
38412 if (vma != find_vma(mm, new_start))
38413 return -EFAULT;
38414
38415+#ifdef CONFIG_PAX_SEGMEXEC
38416+ BUG_ON(pax_find_mirror_vma(vma));
38417+#endif
38418+
38419 /*
38420 * cover the whole range: [new_start, old_end)
38421 */
38422@@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
38423 stack_top = arch_align_stack(stack_top);
38424 stack_top = PAGE_ALIGN(stack_top);
38425
38426- if (unlikely(stack_top < mmap_min_addr) ||
38427- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
38428- return -ENOMEM;
38429-
38430 stack_shift = vma->vm_end - stack_top;
38431
38432 bprm->p -= stack_shift;
38433@@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
38434 bprm->exec -= stack_shift;
38435
38436 down_write(&mm->mmap_sem);
38437+
38438+ /* Move stack pages down in memory. */
38439+ if (stack_shift) {
38440+ ret = shift_arg_pages(vma, stack_shift);
38441+ if (ret)
38442+ goto out_unlock;
38443+ }
38444+
38445 vm_flags = VM_STACK_FLAGS;
38446
38447+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38448+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38449+ vm_flags &= ~VM_EXEC;
38450+
38451+#ifdef CONFIG_PAX_MPROTECT
38452+ if (mm->pax_flags & MF_PAX_MPROTECT)
38453+ vm_flags &= ~VM_MAYEXEC;
38454+#endif
38455+
38456+ }
38457+#endif
38458+
38459 /*
38460 * Adjust stack execute permissions; explicitly enable for
38461 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
38462@@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
38463 goto out_unlock;
38464 BUG_ON(prev != vma);
38465
38466- /* Move stack pages down in memory. */
38467- if (stack_shift) {
38468- ret = shift_arg_pages(vma, stack_shift);
38469- if (ret)
38470- goto out_unlock;
38471- }
38472-
38473 /* mprotect_fixup is overkill to remove the temporary stack flags */
38474 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
38475
38476@@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
38477 struct file *file;
38478 int err;
38479 static const struct open_flags open_exec_flags = {
38480- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
38481+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
38482 .acc_mode = MAY_EXEC | MAY_OPEN,
38483 .intent = LOOKUP_OPEN
38484 };
38485@@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
38486 old_fs = get_fs();
38487 set_fs(get_ds());
38488 /* The cast to a user pointer is valid due to the set_fs() */
38489- result = vfs_read(file, (void __user *)addr, count, &pos);
38490+ result = vfs_read(file, (__force void __user *)addr, count, &pos);
38491 set_fs(old_fs);
38492 return result;
38493 }
38494@@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
38495 }
38496 rcu_read_unlock();
38497
38498- if (p->fs->users > n_fs) {
38499+ if (atomic_read(&p->fs->users) > n_fs) {
38500 bprm->unsafe |= LSM_UNSAFE_SHARE;
38501 } else {
38502 res = -EAGAIN;
38503@@ -1428,11 +1445,35 @@ static int do_execve_common(const char *
38504 struct user_arg_ptr envp,
38505 struct pt_regs *regs)
38506 {
38507+#ifdef CONFIG_GRKERNSEC
38508+ struct file *old_exec_file;
38509+ struct acl_subject_label *old_acl;
38510+ struct rlimit old_rlim[RLIM_NLIMITS];
38511+#endif
38512 struct linux_binprm *bprm;
38513 struct file *file;
38514 struct files_struct *displaced;
38515 bool clear_in_exec;
38516 int retval;
38517+ const struct cred *cred = current_cred();
38518+
38519+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
38520+
38521+ /*
38522+ * We move the actual failure in case of RLIMIT_NPROC excess from
38523+ * set*uid() to execve() because too many poorly written programs
38524+ * don't check setuid() return code. Here we additionally recheck
38525+ * whether NPROC limit is still exceeded.
38526+ */
38527+ if ((current->flags & PF_NPROC_EXCEEDED) &&
38528+ atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
38529+ retval = -EAGAIN;
38530+ goto out_ret;
38531+ }
38532+
38533+ /* We're below the limit (still or again), so we don't want to make
38534+ * further execve() calls fail. */
38535+ current->flags &= ~PF_NPROC_EXCEEDED;
38536
38537 retval = unshare_files(&displaced);
38538 if (retval)
38539@@ -1464,6 +1505,16 @@ static int do_execve_common(const char *
38540 bprm->filename = filename;
38541 bprm->interp = filename;
38542
38543+ if (gr_process_user_ban()) {
38544+ retval = -EPERM;
38545+ goto out_file;
38546+ }
38547+
38548+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
38549+ retval = -EACCES;
38550+ goto out_file;
38551+ }
38552+
38553 retval = bprm_mm_init(bprm);
38554 if (retval)
38555 goto out_file;
38556@@ -1493,9 +1544,40 @@ static int do_execve_common(const char *
38557 if (retval < 0)
38558 goto out;
38559
38560+ if (!gr_tpe_allow(file)) {
38561+ retval = -EACCES;
38562+ goto out;
38563+ }
38564+
38565+ if (gr_check_crash_exec(file)) {
38566+ retval = -EACCES;
38567+ goto out;
38568+ }
38569+
38570+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
38571+
38572+ gr_handle_exec_args(bprm, argv);
38573+
38574+#ifdef CONFIG_GRKERNSEC
38575+ old_acl = current->acl;
38576+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
38577+ old_exec_file = current->exec_file;
38578+ get_file(file);
38579+ current->exec_file = file;
38580+#endif
38581+
38582+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
38583+ bprm->unsafe & LSM_UNSAFE_SHARE);
38584+ if (retval < 0)
38585+ goto out_fail;
38586+
38587 retval = search_binary_handler(bprm,regs);
38588 if (retval < 0)
38589- goto out;
38590+ goto out_fail;
38591+#ifdef CONFIG_GRKERNSEC
38592+ if (old_exec_file)
38593+ fput(old_exec_file);
38594+#endif
38595
38596 /* execve succeeded */
38597 current->fs->in_exec = 0;
38598@@ -1506,6 +1588,14 @@ static int do_execve_common(const char *
38599 put_files_struct(displaced);
38600 return retval;
38601
38602+out_fail:
38603+#ifdef CONFIG_GRKERNSEC
38604+ current->acl = old_acl;
38605+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
38606+ fput(current->exec_file);
38607+ current->exec_file = old_exec_file;
38608+#endif
38609+
38610 out:
38611 if (bprm->mm) {
38612 acct_arg_size(bprm, 0);
38613@@ -1579,7 +1669,7 @@ static int expand_corename(struct core_n
38614 {
38615 char *old_corename = cn->corename;
38616
38617- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
38618+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
38619 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
38620
38621 if (!cn->corename) {
38622@@ -1667,7 +1757,7 @@ static int format_corename(struct core_n
38623 int pid_in_pattern = 0;
38624 int err = 0;
38625
38626- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
38627+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
38628 cn->corename = kmalloc(cn->size, GFP_KERNEL);
38629 cn->used = 0;
38630
38631@@ -1758,6 +1848,219 @@ out:
38632 return ispipe;
38633 }
38634
38635+int pax_check_flags(unsigned long *flags)
38636+{
38637+ int retval = 0;
38638+
38639+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
38640+ if (*flags & MF_PAX_SEGMEXEC)
38641+ {
38642+ *flags &= ~MF_PAX_SEGMEXEC;
38643+ retval = -EINVAL;
38644+ }
38645+#endif
38646+
38647+ if ((*flags & MF_PAX_PAGEEXEC)
38648+
38649+#ifdef CONFIG_PAX_PAGEEXEC
38650+ && (*flags & MF_PAX_SEGMEXEC)
38651+#endif
38652+
38653+ )
38654+ {
38655+ *flags &= ~MF_PAX_PAGEEXEC;
38656+ retval = -EINVAL;
38657+ }
38658+
38659+ if ((*flags & MF_PAX_MPROTECT)
38660+
38661+#ifdef CONFIG_PAX_MPROTECT
38662+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38663+#endif
38664+
38665+ )
38666+ {
38667+ *flags &= ~MF_PAX_MPROTECT;
38668+ retval = -EINVAL;
38669+ }
38670+
38671+ if ((*flags & MF_PAX_EMUTRAMP)
38672+
38673+#ifdef CONFIG_PAX_EMUTRAMP
38674+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38675+#endif
38676+
38677+ )
38678+ {
38679+ *flags &= ~MF_PAX_EMUTRAMP;
38680+ retval = -EINVAL;
38681+ }
38682+
38683+ return retval;
38684+}
38685+
38686+EXPORT_SYMBOL(pax_check_flags);
38687+
38688+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38689+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
38690+{
38691+ struct task_struct *tsk = current;
38692+ struct mm_struct *mm = current->mm;
38693+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
38694+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
38695+ char *path_exec = NULL;
38696+ char *path_fault = NULL;
38697+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
38698+
38699+ if (buffer_exec && buffer_fault) {
38700+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
38701+
38702+ down_read(&mm->mmap_sem);
38703+ vma = mm->mmap;
38704+ while (vma && (!vma_exec || !vma_fault)) {
38705+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
38706+ vma_exec = vma;
38707+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
38708+ vma_fault = vma;
38709+ vma = vma->vm_next;
38710+ }
38711+ if (vma_exec) {
38712+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
38713+ if (IS_ERR(path_exec))
38714+ path_exec = "<path too long>";
38715+ else {
38716+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
38717+ if (path_exec) {
38718+ *path_exec = 0;
38719+ path_exec = buffer_exec;
38720+ } else
38721+ path_exec = "<path too long>";
38722+ }
38723+ }
38724+ if (vma_fault) {
38725+ start = vma_fault->vm_start;
38726+ end = vma_fault->vm_end;
38727+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
38728+ if (vma_fault->vm_file) {
38729+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
38730+ if (IS_ERR(path_fault))
38731+ path_fault = "<path too long>";
38732+ else {
38733+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
38734+ if (path_fault) {
38735+ *path_fault = 0;
38736+ path_fault = buffer_fault;
38737+ } else
38738+ path_fault = "<path too long>";
38739+ }
38740+ } else
38741+ path_fault = "<anonymous mapping>";
38742+ }
38743+ up_read(&mm->mmap_sem);
38744+ }
38745+ if (tsk->signal->curr_ip)
38746+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
38747+ else
38748+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
38749+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
38750+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
38751+ task_uid(tsk), task_euid(tsk), pc, sp);
38752+ free_page((unsigned long)buffer_exec);
38753+ free_page((unsigned long)buffer_fault);
38754+ pax_report_insns(pc, sp);
38755+ do_coredump(SIGKILL, SIGKILL, regs);
38756+}
38757+#endif
38758+
38759+#ifdef CONFIG_PAX_REFCOUNT
38760+void pax_report_refcount_overflow(struct pt_regs *regs)
38761+{
38762+ if (current->signal->curr_ip)
38763+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38764+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
38765+ else
38766+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38767+ current->comm, task_pid_nr(current), current_uid(), current_euid());
38768+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
38769+ show_regs(regs);
38770+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
38771+}
38772+#endif
38773+
38774+#ifdef CONFIG_PAX_USERCOPY
38775+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
38776+int object_is_on_stack(const void *obj, unsigned long len)
38777+{
38778+ const void * const stack = task_stack_page(current);
38779+ const void * const stackend = stack + THREAD_SIZE;
38780+
38781+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38782+ const void *frame = NULL;
38783+ const void *oldframe;
38784+#endif
38785+
38786+ if (obj + len < obj)
38787+ return -1;
38788+
38789+ if (obj + len <= stack || stackend <= obj)
38790+ return 0;
38791+
38792+ if (obj < stack || stackend < obj + len)
38793+ return -1;
38794+
38795+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38796+ oldframe = __builtin_frame_address(1);
38797+ if (oldframe)
38798+ frame = __builtin_frame_address(2);
38799+ /*
38800+ low ----------------------------------------------> high
38801+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
38802+ ^----------------^
38803+ allow copies only within here
38804+ */
38805+ while (stack <= frame && frame < stackend) {
38806+ /* if obj + len extends past the last frame, this
38807+ check won't pass and the next frame will be 0,
38808+ causing us to bail out and correctly report
38809+ the copy as invalid
38810+ */
38811+ if (obj + len <= frame)
38812+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
38813+ oldframe = frame;
38814+ frame = *(const void * const *)frame;
38815+ }
38816+ return -1;
38817+#else
38818+ return 1;
38819+#endif
38820+}
38821+
38822+
38823+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
38824+{
38825+ if (current->signal->curr_ip)
38826+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38827+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38828+ else
38829+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38830+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38831+ dump_stack();
38832+ gr_handle_kernel_exploit();
38833+ do_group_exit(SIGKILL);
38834+}
38835+#endif
38836+
38837+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
38838+void pax_track_stack(void)
38839+{
38840+ unsigned long sp = (unsigned long)&sp;
38841+ if (sp < current_thread_info()->lowest_stack &&
38842+ sp > (unsigned long)task_stack_page(current))
38843+ current_thread_info()->lowest_stack = sp;
38844+}
38845+EXPORT_SYMBOL(pax_track_stack);
38846+#endif
38847+
38848 static int zap_process(struct task_struct *start, int exit_code)
38849 {
38850 struct task_struct *t;
38851@@ -1969,17 +2272,17 @@ static void wait_for_dump_helpers(struct
38852 pipe = file->f_path.dentry->d_inode->i_pipe;
38853
38854 pipe_lock(pipe);
38855- pipe->readers++;
38856- pipe->writers--;
38857+ atomic_inc(&pipe->readers);
38858+ atomic_dec(&pipe->writers);
38859
38860- while ((pipe->readers > 1) && (!signal_pending(current))) {
38861+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
38862 wake_up_interruptible_sync(&pipe->wait);
38863 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
38864 pipe_wait(pipe);
38865 }
38866
38867- pipe->readers--;
38868- pipe->writers++;
38869+ atomic_dec(&pipe->readers);
38870+ atomic_inc(&pipe->writers);
38871 pipe_unlock(pipe);
38872
38873 }
38874@@ -2040,7 +2343,7 @@ void do_coredump(long signr, int exit_co
38875 int retval = 0;
38876 int flag = 0;
38877 int ispipe;
38878- static atomic_t core_dump_count = ATOMIC_INIT(0);
38879+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
38880 struct coredump_params cprm = {
38881 .signr = signr,
38882 .regs = regs,
38883@@ -2055,6 +2358,9 @@ void do_coredump(long signr, int exit_co
38884
38885 audit_core_dumps(signr);
38886
38887+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
38888+ gr_handle_brute_attach(current, cprm.mm_flags);
38889+
38890 binfmt = mm->binfmt;
38891 if (!binfmt || !binfmt->core_dump)
38892 goto fail;
38893@@ -2095,6 +2401,8 @@ void do_coredump(long signr, int exit_co
38894 goto fail_corename;
38895 }
38896
38897+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
38898+
38899 if (ispipe) {
38900 int dump_count;
38901 char **helper_argv;
38902@@ -2122,7 +2430,7 @@ void do_coredump(long signr, int exit_co
38903 }
38904 cprm.limit = RLIM_INFINITY;
38905
38906- dump_count = atomic_inc_return(&core_dump_count);
38907+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
38908 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
38909 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
38910 task_tgid_vnr(current), current->comm);
38911@@ -2192,7 +2500,7 @@ close_fail:
38912 filp_close(cprm.file, NULL);
38913 fail_dropcount:
38914 if (ispipe)
38915- atomic_dec(&core_dump_count);
38916+ atomic_dec_unchecked(&core_dump_count);
38917 fail_unlock:
38918 kfree(cn.corename);
38919 fail_corename:
38920diff -urNp linux-3.0.4/fs/ext2/balloc.c linux-3.0.4/fs/ext2/balloc.c
38921--- linux-3.0.4/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
38922+++ linux-3.0.4/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
38923@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
38924
38925 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38926 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38927- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38928+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38929 sbi->s_resuid != current_fsuid() &&
38930 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38931 return 0;
38932diff -urNp linux-3.0.4/fs/ext3/balloc.c linux-3.0.4/fs/ext3/balloc.c
38933--- linux-3.0.4/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
38934+++ linux-3.0.4/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
38935@@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
38936
38937 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38938 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38939- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38940+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38941 sbi->s_resuid != current_fsuid() &&
38942 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38943 return 0;
38944diff -urNp linux-3.0.4/fs/ext4/balloc.c linux-3.0.4/fs/ext4/balloc.c
38945--- linux-3.0.4/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
38946+++ linux-3.0.4/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
38947@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
38948 /* Hm, nope. Are (enough) root reserved blocks available? */
38949 if (sbi->s_resuid == current_fsuid() ||
38950 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
38951- capable(CAP_SYS_RESOURCE) ||
38952- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
38953+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
38954+ capable_nolog(CAP_SYS_RESOURCE)) {
38955
38956 if (free_blocks >= (nblocks + dirty_blocks))
38957 return 1;
38958diff -urNp linux-3.0.4/fs/ext4/ext4.h linux-3.0.4/fs/ext4/ext4.h
38959--- linux-3.0.4/fs/ext4/ext4.h 2011-08-23 21:44:40.000000000 -0400
38960+++ linux-3.0.4/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
38961@@ -1177,19 +1177,19 @@ struct ext4_sb_info {
38962 unsigned long s_mb_last_start;
38963
38964 /* stats for buddy allocator */
38965- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
38966- atomic_t s_bal_success; /* we found long enough chunks */
38967- atomic_t s_bal_allocated; /* in blocks */
38968- atomic_t s_bal_ex_scanned; /* total extents scanned */
38969- atomic_t s_bal_goals; /* goal hits */
38970- atomic_t s_bal_breaks; /* too long searches */
38971- atomic_t s_bal_2orders; /* 2^order hits */
38972+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
38973+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
38974+ atomic_unchecked_t s_bal_allocated; /* in blocks */
38975+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
38976+ atomic_unchecked_t s_bal_goals; /* goal hits */
38977+ atomic_unchecked_t s_bal_breaks; /* too long searches */
38978+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
38979 spinlock_t s_bal_lock;
38980 unsigned long s_mb_buddies_generated;
38981 unsigned long long s_mb_generation_time;
38982- atomic_t s_mb_lost_chunks;
38983- atomic_t s_mb_preallocated;
38984- atomic_t s_mb_discarded;
38985+ atomic_unchecked_t s_mb_lost_chunks;
38986+ atomic_unchecked_t s_mb_preallocated;
38987+ atomic_unchecked_t s_mb_discarded;
38988 atomic_t s_lock_busy;
38989
38990 /* locality groups */
38991diff -urNp linux-3.0.4/fs/ext4/mballoc.c linux-3.0.4/fs/ext4/mballoc.c
38992--- linux-3.0.4/fs/ext4/mballoc.c 2011-08-23 21:44:40.000000000 -0400
38993+++ linux-3.0.4/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
38994@@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
38995 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
38996
38997 if (EXT4_SB(sb)->s_mb_stats)
38998- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
38999+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
39000
39001 break;
39002 }
39003@@ -2087,7 +2087,7 @@ repeat:
39004 ac->ac_status = AC_STATUS_CONTINUE;
39005 ac->ac_flags |= EXT4_MB_HINT_FIRST;
39006 cr = 3;
39007- atomic_inc(&sbi->s_mb_lost_chunks);
39008+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
39009 goto repeat;
39010 }
39011 }
39012@@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
39013 ext4_grpblk_t counters[16];
39014 } sg;
39015
39016+ pax_track_stack();
39017+
39018 group--;
39019 if (group == 0)
39020 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
39021@@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
39022 if (sbi->s_mb_stats) {
39023 printk(KERN_INFO
39024 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
39025- atomic_read(&sbi->s_bal_allocated),
39026- atomic_read(&sbi->s_bal_reqs),
39027- atomic_read(&sbi->s_bal_success));
39028+ atomic_read_unchecked(&sbi->s_bal_allocated),
39029+ atomic_read_unchecked(&sbi->s_bal_reqs),
39030+ atomic_read_unchecked(&sbi->s_bal_success));
39031 printk(KERN_INFO
39032 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
39033 "%u 2^N hits, %u breaks, %u lost\n",
39034- atomic_read(&sbi->s_bal_ex_scanned),
39035- atomic_read(&sbi->s_bal_goals),
39036- atomic_read(&sbi->s_bal_2orders),
39037- atomic_read(&sbi->s_bal_breaks),
39038- atomic_read(&sbi->s_mb_lost_chunks));
39039+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
39040+ atomic_read_unchecked(&sbi->s_bal_goals),
39041+ atomic_read_unchecked(&sbi->s_bal_2orders),
39042+ atomic_read_unchecked(&sbi->s_bal_breaks),
39043+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
39044 printk(KERN_INFO
39045 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
39046 sbi->s_mb_buddies_generated++,
39047 sbi->s_mb_generation_time);
39048 printk(KERN_INFO
39049 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
39050- atomic_read(&sbi->s_mb_preallocated),
39051- atomic_read(&sbi->s_mb_discarded));
39052+ atomic_read_unchecked(&sbi->s_mb_preallocated),
39053+ atomic_read_unchecked(&sbi->s_mb_discarded));
39054 }
39055
39056 free_percpu(sbi->s_locality_groups);
39057@@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
39058 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
39059
39060 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
39061- atomic_inc(&sbi->s_bal_reqs);
39062- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39063+ atomic_inc_unchecked(&sbi->s_bal_reqs);
39064+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39065 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
39066- atomic_inc(&sbi->s_bal_success);
39067- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
39068+ atomic_inc_unchecked(&sbi->s_bal_success);
39069+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
39070 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
39071 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
39072- atomic_inc(&sbi->s_bal_goals);
39073+ atomic_inc_unchecked(&sbi->s_bal_goals);
39074 if (ac->ac_found > sbi->s_mb_max_to_scan)
39075- atomic_inc(&sbi->s_bal_breaks);
39076+ atomic_inc_unchecked(&sbi->s_bal_breaks);
39077 }
39078
39079 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
39080@@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
39081 trace_ext4_mb_new_inode_pa(ac, pa);
39082
39083 ext4_mb_use_inode_pa(ac, pa);
39084- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39085+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39086
39087 ei = EXT4_I(ac->ac_inode);
39088 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39089@@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
39090 trace_ext4_mb_new_group_pa(ac, pa);
39091
39092 ext4_mb_use_group_pa(ac, pa);
39093- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39094+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39095
39096 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39097 lg = ac->ac_lg;
39098@@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
39099 * from the bitmap and continue.
39100 */
39101 }
39102- atomic_add(free, &sbi->s_mb_discarded);
39103+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
39104
39105 return err;
39106 }
39107@@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
39108 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
39109 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
39110 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
39111- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39112+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39113 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
39114
39115 return 0;
39116diff -urNp linux-3.0.4/fs/fcntl.c linux-3.0.4/fs/fcntl.c
39117--- linux-3.0.4/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
39118+++ linux-3.0.4/fs/fcntl.c 2011-08-23 21:48:14.000000000 -0400
39119@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
39120 if (err)
39121 return err;
39122
39123+ if (gr_handle_chroot_fowner(pid, type))
39124+ return -ENOENT;
39125+ if (gr_check_protected_task_fowner(pid, type))
39126+ return -EACCES;
39127+
39128 f_modown(filp, pid, type, force);
39129 return 0;
39130 }
39131@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
39132 switch (cmd) {
39133 case F_DUPFD:
39134 case F_DUPFD_CLOEXEC:
39135+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
39136 if (arg >= rlimit(RLIMIT_NOFILE))
39137 break;
39138 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
39139@@ -835,14 +841,14 @@ static int __init fcntl_init(void)
39140 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
39141 * is defined as O_NONBLOCK on some platforms and not on others.
39142 */
39143- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
39144+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
39145 O_RDONLY | O_WRONLY | O_RDWR |
39146 O_CREAT | O_EXCL | O_NOCTTY |
39147 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
39148 __O_SYNC | O_DSYNC | FASYNC |
39149 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
39150 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
39151- __FMODE_EXEC | O_PATH
39152+ __FMODE_EXEC | O_PATH | FMODE_GREXEC
39153 ));
39154
39155 fasync_cache = kmem_cache_create("fasync_cache",
39156diff -urNp linux-3.0.4/fs/fifo.c linux-3.0.4/fs/fifo.c
39157--- linux-3.0.4/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
39158+++ linux-3.0.4/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
39159@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
39160 */
39161 filp->f_op = &read_pipefifo_fops;
39162 pipe->r_counter++;
39163- if (pipe->readers++ == 0)
39164+ if (atomic_inc_return(&pipe->readers) == 1)
39165 wake_up_partner(inode);
39166
39167- if (!pipe->writers) {
39168+ if (!atomic_read(&pipe->writers)) {
39169 if ((filp->f_flags & O_NONBLOCK)) {
39170 /* suppress POLLHUP until we have
39171 * seen a writer */
39172@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
39173 * errno=ENXIO when there is no process reading the FIFO.
39174 */
39175 ret = -ENXIO;
39176- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
39177+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
39178 goto err;
39179
39180 filp->f_op = &write_pipefifo_fops;
39181 pipe->w_counter++;
39182- if (!pipe->writers++)
39183+ if (atomic_inc_return(&pipe->writers) == 1)
39184 wake_up_partner(inode);
39185
39186- if (!pipe->readers) {
39187+ if (!atomic_read(&pipe->readers)) {
39188 wait_for_partner(inode, &pipe->r_counter);
39189 if (signal_pending(current))
39190 goto err_wr;
39191@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
39192 */
39193 filp->f_op = &rdwr_pipefifo_fops;
39194
39195- pipe->readers++;
39196- pipe->writers++;
39197+ atomic_inc(&pipe->readers);
39198+ atomic_inc(&pipe->writers);
39199 pipe->r_counter++;
39200 pipe->w_counter++;
39201- if (pipe->readers == 1 || pipe->writers == 1)
39202+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
39203 wake_up_partner(inode);
39204 break;
39205
39206@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
39207 return 0;
39208
39209 err_rd:
39210- if (!--pipe->readers)
39211+ if (atomic_dec_and_test(&pipe->readers))
39212 wake_up_interruptible(&pipe->wait);
39213 ret = -ERESTARTSYS;
39214 goto err;
39215
39216 err_wr:
39217- if (!--pipe->writers)
39218+ if (atomic_dec_and_test(&pipe->writers))
39219 wake_up_interruptible(&pipe->wait);
39220 ret = -ERESTARTSYS;
39221 goto err;
39222
39223 err:
39224- if (!pipe->readers && !pipe->writers)
39225+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
39226 free_pipe_info(inode);
39227
39228 err_nocleanup:
39229diff -urNp linux-3.0.4/fs/file.c linux-3.0.4/fs/file.c
39230--- linux-3.0.4/fs/file.c 2011-07-21 22:17:23.000000000 -0400
39231+++ linux-3.0.4/fs/file.c 2011-08-23 21:48:14.000000000 -0400
39232@@ -15,6 +15,7 @@
39233 #include <linux/slab.h>
39234 #include <linux/vmalloc.h>
39235 #include <linux/file.h>
39236+#include <linux/security.h>
39237 #include <linux/fdtable.h>
39238 #include <linux/bitops.h>
39239 #include <linux/interrupt.h>
39240@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
39241 * N.B. For clone tasks sharing a files structure, this test
39242 * will limit the total number of files that can be opened.
39243 */
39244+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
39245 if (nr >= rlimit(RLIMIT_NOFILE))
39246 return -EMFILE;
39247
39248diff -urNp linux-3.0.4/fs/filesystems.c linux-3.0.4/fs/filesystems.c
39249--- linux-3.0.4/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
39250+++ linux-3.0.4/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
39251@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
39252 int len = dot ? dot - name : strlen(name);
39253
39254 fs = __get_fs_type(name, len);
39255+
39256+#ifdef CONFIG_GRKERNSEC_MODHARDEN
39257+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
39258+#else
39259 if (!fs && (request_module("%.*s", len, name) == 0))
39260+#endif
39261 fs = __get_fs_type(name, len);
39262
39263 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
39264diff -urNp linux-3.0.4/fs/fscache/cookie.c linux-3.0.4/fs/fscache/cookie.c
39265--- linux-3.0.4/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
39266+++ linux-3.0.4/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
39267@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
39268 parent ? (char *) parent->def->name : "<no-parent>",
39269 def->name, netfs_data);
39270
39271- fscache_stat(&fscache_n_acquires);
39272+ fscache_stat_unchecked(&fscache_n_acquires);
39273
39274 /* if there's no parent cookie, then we don't create one here either */
39275 if (!parent) {
39276- fscache_stat(&fscache_n_acquires_null);
39277+ fscache_stat_unchecked(&fscache_n_acquires_null);
39278 _leave(" [no parent]");
39279 return NULL;
39280 }
39281@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
39282 /* allocate and initialise a cookie */
39283 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
39284 if (!cookie) {
39285- fscache_stat(&fscache_n_acquires_oom);
39286+ fscache_stat_unchecked(&fscache_n_acquires_oom);
39287 _leave(" [ENOMEM]");
39288 return NULL;
39289 }
39290@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
39291
39292 switch (cookie->def->type) {
39293 case FSCACHE_COOKIE_TYPE_INDEX:
39294- fscache_stat(&fscache_n_cookie_index);
39295+ fscache_stat_unchecked(&fscache_n_cookie_index);
39296 break;
39297 case FSCACHE_COOKIE_TYPE_DATAFILE:
39298- fscache_stat(&fscache_n_cookie_data);
39299+ fscache_stat_unchecked(&fscache_n_cookie_data);
39300 break;
39301 default:
39302- fscache_stat(&fscache_n_cookie_special);
39303+ fscache_stat_unchecked(&fscache_n_cookie_special);
39304 break;
39305 }
39306
39307@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
39308 if (fscache_acquire_non_index_cookie(cookie) < 0) {
39309 atomic_dec(&parent->n_children);
39310 __fscache_cookie_put(cookie);
39311- fscache_stat(&fscache_n_acquires_nobufs);
39312+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
39313 _leave(" = NULL");
39314 return NULL;
39315 }
39316 }
39317
39318- fscache_stat(&fscache_n_acquires_ok);
39319+ fscache_stat_unchecked(&fscache_n_acquires_ok);
39320 _leave(" = %p", cookie);
39321 return cookie;
39322 }
39323@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
39324 cache = fscache_select_cache_for_object(cookie->parent);
39325 if (!cache) {
39326 up_read(&fscache_addremove_sem);
39327- fscache_stat(&fscache_n_acquires_no_cache);
39328+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
39329 _leave(" = -ENOMEDIUM [no cache]");
39330 return -ENOMEDIUM;
39331 }
39332@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
39333 object = cache->ops->alloc_object(cache, cookie);
39334 fscache_stat_d(&fscache_n_cop_alloc_object);
39335 if (IS_ERR(object)) {
39336- fscache_stat(&fscache_n_object_no_alloc);
39337+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
39338 ret = PTR_ERR(object);
39339 goto error;
39340 }
39341
39342- fscache_stat(&fscache_n_object_alloc);
39343+ fscache_stat_unchecked(&fscache_n_object_alloc);
39344
39345 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
39346
39347@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
39348 struct fscache_object *object;
39349 struct hlist_node *_p;
39350
39351- fscache_stat(&fscache_n_updates);
39352+ fscache_stat_unchecked(&fscache_n_updates);
39353
39354 if (!cookie) {
39355- fscache_stat(&fscache_n_updates_null);
39356+ fscache_stat_unchecked(&fscache_n_updates_null);
39357 _leave(" [no cookie]");
39358 return;
39359 }
39360@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
39361 struct fscache_object *object;
39362 unsigned long event;
39363
39364- fscache_stat(&fscache_n_relinquishes);
39365+ fscache_stat_unchecked(&fscache_n_relinquishes);
39366 if (retire)
39367- fscache_stat(&fscache_n_relinquishes_retire);
39368+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
39369
39370 if (!cookie) {
39371- fscache_stat(&fscache_n_relinquishes_null);
39372+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
39373 _leave(" [no cookie]");
39374 return;
39375 }
39376@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
39377
39378 /* wait for the cookie to finish being instantiated (or to fail) */
39379 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
39380- fscache_stat(&fscache_n_relinquishes_waitcrt);
39381+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
39382 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
39383 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
39384 }
39385diff -urNp linux-3.0.4/fs/fscache/internal.h linux-3.0.4/fs/fscache/internal.h
39386--- linux-3.0.4/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
39387+++ linux-3.0.4/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
39388@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
39389 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
39390 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
39391
39392-extern atomic_t fscache_n_op_pend;
39393-extern atomic_t fscache_n_op_run;
39394-extern atomic_t fscache_n_op_enqueue;
39395-extern atomic_t fscache_n_op_deferred_release;
39396-extern atomic_t fscache_n_op_release;
39397-extern atomic_t fscache_n_op_gc;
39398-extern atomic_t fscache_n_op_cancelled;
39399-extern atomic_t fscache_n_op_rejected;
39400-
39401-extern atomic_t fscache_n_attr_changed;
39402-extern atomic_t fscache_n_attr_changed_ok;
39403-extern atomic_t fscache_n_attr_changed_nobufs;
39404-extern atomic_t fscache_n_attr_changed_nomem;
39405-extern atomic_t fscache_n_attr_changed_calls;
39406-
39407-extern atomic_t fscache_n_allocs;
39408-extern atomic_t fscache_n_allocs_ok;
39409-extern atomic_t fscache_n_allocs_wait;
39410-extern atomic_t fscache_n_allocs_nobufs;
39411-extern atomic_t fscache_n_allocs_intr;
39412-extern atomic_t fscache_n_allocs_object_dead;
39413-extern atomic_t fscache_n_alloc_ops;
39414-extern atomic_t fscache_n_alloc_op_waits;
39415-
39416-extern atomic_t fscache_n_retrievals;
39417-extern atomic_t fscache_n_retrievals_ok;
39418-extern atomic_t fscache_n_retrievals_wait;
39419-extern atomic_t fscache_n_retrievals_nodata;
39420-extern atomic_t fscache_n_retrievals_nobufs;
39421-extern atomic_t fscache_n_retrievals_intr;
39422-extern atomic_t fscache_n_retrievals_nomem;
39423-extern atomic_t fscache_n_retrievals_object_dead;
39424-extern atomic_t fscache_n_retrieval_ops;
39425-extern atomic_t fscache_n_retrieval_op_waits;
39426-
39427-extern atomic_t fscache_n_stores;
39428-extern atomic_t fscache_n_stores_ok;
39429-extern atomic_t fscache_n_stores_again;
39430-extern atomic_t fscache_n_stores_nobufs;
39431-extern atomic_t fscache_n_stores_oom;
39432-extern atomic_t fscache_n_store_ops;
39433-extern atomic_t fscache_n_store_calls;
39434-extern atomic_t fscache_n_store_pages;
39435-extern atomic_t fscache_n_store_radix_deletes;
39436-extern atomic_t fscache_n_store_pages_over_limit;
39437-
39438-extern atomic_t fscache_n_store_vmscan_not_storing;
39439-extern atomic_t fscache_n_store_vmscan_gone;
39440-extern atomic_t fscache_n_store_vmscan_busy;
39441-extern atomic_t fscache_n_store_vmscan_cancelled;
39442-
39443-extern atomic_t fscache_n_marks;
39444-extern atomic_t fscache_n_uncaches;
39445-
39446-extern atomic_t fscache_n_acquires;
39447-extern atomic_t fscache_n_acquires_null;
39448-extern atomic_t fscache_n_acquires_no_cache;
39449-extern atomic_t fscache_n_acquires_ok;
39450-extern atomic_t fscache_n_acquires_nobufs;
39451-extern atomic_t fscache_n_acquires_oom;
39452-
39453-extern atomic_t fscache_n_updates;
39454-extern atomic_t fscache_n_updates_null;
39455-extern atomic_t fscache_n_updates_run;
39456-
39457-extern atomic_t fscache_n_relinquishes;
39458-extern atomic_t fscache_n_relinquishes_null;
39459-extern atomic_t fscache_n_relinquishes_waitcrt;
39460-extern atomic_t fscache_n_relinquishes_retire;
39461-
39462-extern atomic_t fscache_n_cookie_index;
39463-extern atomic_t fscache_n_cookie_data;
39464-extern atomic_t fscache_n_cookie_special;
39465-
39466-extern atomic_t fscache_n_object_alloc;
39467-extern atomic_t fscache_n_object_no_alloc;
39468-extern atomic_t fscache_n_object_lookups;
39469-extern atomic_t fscache_n_object_lookups_negative;
39470-extern atomic_t fscache_n_object_lookups_positive;
39471-extern atomic_t fscache_n_object_lookups_timed_out;
39472-extern atomic_t fscache_n_object_created;
39473-extern atomic_t fscache_n_object_avail;
39474-extern atomic_t fscache_n_object_dead;
39475-
39476-extern atomic_t fscache_n_checkaux_none;
39477-extern atomic_t fscache_n_checkaux_okay;
39478-extern atomic_t fscache_n_checkaux_update;
39479-extern atomic_t fscache_n_checkaux_obsolete;
39480+extern atomic_unchecked_t fscache_n_op_pend;
39481+extern atomic_unchecked_t fscache_n_op_run;
39482+extern atomic_unchecked_t fscache_n_op_enqueue;
39483+extern atomic_unchecked_t fscache_n_op_deferred_release;
39484+extern atomic_unchecked_t fscache_n_op_release;
39485+extern atomic_unchecked_t fscache_n_op_gc;
39486+extern atomic_unchecked_t fscache_n_op_cancelled;
39487+extern atomic_unchecked_t fscache_n_op_rejected;
39488+
39489+extern atomic_unchecked_t fscache_n_attr_changed;
39490+extern atomic_unchecked_t fscache_n_attr_changed_ok;
39491+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
39492+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
39493+extern atomic_unchecked_t fscache_n_attr_changed_calls;
39494+
39495+extern atomic_unchecked_t fscache_n_allocs;
39496+extern atomic_unchecked_t fscache_n_allocs_ok;
39497+extern atomic_unchecked_t fscache_n_allocs_wait;
39498+extern atomic_unchecked_t fscache_n_allocs_nobufs;
39499+extern atomic_unchecked_t fscache_n_allocs_intr;
39500+extern atomic_unchecked_t fscache_n_allocs_object_dead;
39501+extern atomic_unchecked_t fscache_n_alloc_ops;
39502+extern atomic_unchecked_t fscache_n_alloc_op_waits;
39503+
39504+extern atomic_unchecked_t fscache_n_retrievals;
39505+extern atomic_unchecked_t fscache_n_retrievals_ok;
39506+extern atomic_unchecked_t fscache_n_retrievals_wait;
39507+extern atomic_unchecked_t fscache_n_retrievals_nodata;
39508+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
39509+extern atomic_unchecked_t fscache_n_retrievals_intr;
39510+extern atomic_unchecked_t fscache_n_retrievals_nomem;
39511+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
39512+extern atomic_unchecked_t fscache_n_retrieval_ops;
39513+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
39514+
39515+extern atomic_unchecked_t fscache_n_stores;
39516+extern atomic_unchecked_t fscache_n_stores_ok;
39517+extern atomic_unchecked_t fscache_n_stores_again;
39518+extern atomic_unchecked_t fscache_n_stores_nobufs;
39519+extern atomic_unchecked_t fscache_n_stores_oom;
39520+extern atomic_unchecked_t fscache_n_store_ops;
39521+extern atomic_unchecked_t fscache_n_store_calls;
39522+extern atomic_unchecked_t fscache_n_store_pages;
39523+extern atomic_unchecked_t fscache_n_store_radix_deletes;
39524+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
39525+
39526+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
39527+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
39528+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
39529+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
39530+
39531+extern atomic_unchecked_t fscache_n_marks;
39532+extern atomic_unchecked_t fscache_n_uncaches;
39533+
39534+extern atomic_unchecked_t fscache_n_acquires;
39535+extern atomic_unchecked_t fscache_n_acquires_null;
39536+extern atomic_unchecked_t fscache_n_acquires_no_cache;
39537+extern atomic_unchecked_t fscache_n_acquires_ok;
39538+extern atomic_unchecked_t fscache_n_acquires_nobufs;
39539+extern atomic_unchecked_t fscache_n_acquires_oom;
39540+
39541+extern atomic_unchecked_t fscache_n_updates;
39542+extern atomic_unchecked_t fscache_n_updates_null;
39543+extern atomic_unchecked_t fscache_n_updates_run;
39544+
39545+extern atomic_unchecked_t fscache_n_relinquishes;
39546+extern atomic_unchecked_t fscache_n_relinquishes_null;
39547+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
39548+extern atomic_unchecked_t fscache_n_relinquishes_retire;
39549+
39550+extern atomic_unchecked_t fscache_n_cookie_index;
39551+extern atomic_unchecked_t fscache_n_cookie_data;
39552+extern atomic_unchecked_t fscache_n_cookie_special;
39553+
39554+extern atomic_unchecked_t fscache_n_object_alloc;
39555+extern atomic_unchecked_t fscache_n_object_no_alloc;
39556+extern atomic_unchecked_t fscache_n_object_lookups;
39557+extern atomic_unchecked_t fscache_n_object_lookups_negative;
39558+extern atomic_unchecked_t fscache_n_object_lookups_positive;
39559+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
39560+extern atomic_unchecked_t fscache_n_object_created;
39561+extern atomic_unchecked_t fscache_n_object_avail;
39562+extern atomic_unchecked_t fscache_n_object_dead;
39563+
39564+extern atomic_unchecked_t fscache_n_checkaux_none;
39565+extern atomic_unchecked_t fscache_n_checkaux_okay;
39566+extern atomic_unchecked_t fscache_n_checkaux_update;
39567+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
39568
39569 extern atomic_t fscache_n_cop_alloc_object;
39570 extern atomic_t fscache_n_cop_lookup_object;
39571@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
39572 atomic_inc(stat);
39573 }
39574
39575+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
39576+{
39577+ atomic_inc_unchecked(stat);
39578+}
39579+
39580 static inline void fscache_stat_d(atomic_t *stat)
39581 {
39582 atomic_dec(stat);
39583@@ -267,6 +272,7 @@ extern const struct file_operations fsca
39584
39585 #define __fscache_stat(stat) (NULL)
39586 #define fscache_stat(stat) do {} while (0)
39587+#define fscache_stat_unchecked(stat) do {} while (0)
39588 #define fscache_stat_d(stat) do {} while (0)
39589 #endif
39590
39591diff -urNp linux-3.0.4/fs/fscache/object.c linux-3.0.4/fs/fscache/object.c
39592--- linux-3.0.4/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
39593+++ linux-3.0.4/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
39594@@ -128,7 +128,7 @@ static void fscache_object_state_machine
39595 /* update the object metadata on disk */
39596 case FSCACHE_OBJECT_UPDATING:
39597 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
39598- fscache_stat(&fscache_n_updates_run);
39599+ fscache_stat_unchecked(&fscache_n_updates_run);
39600 fscache_stat(&fscache_n_cop_update_object);
39601 object->cache->ops->update_object(object);
39602 fscache_stat_d(&fscache_n_cop_update_object);
39603@@ -217,7 +217,7 @@ static void fscache_object_state_machine
39604 spin_lock(&object->lock);
39605 object->state = FSCACHE_OBJECT_DEAD;
39606 spin_unlock(&object->lock);
39607- fscache_stat(&fscache_n_object_dead);
39608+ fscache_stat_unchecked(&fscache_n_object_dead);
39609 goto terminal_transit;
39610
39611 /* handle the parent cache of this object being withdrawn from
39612@@ -232,7 +232,7 @@ static void fscache_object_state_machine
39613 spin_lock(&object->lock);
39614 object->state = FSCACHE_OBJECT_DEAD;
39615 spin_unlock(&object->lock);
39616- fscache_stat(&fscache_n_object_dead);
39617+ fscache_stat_unchecked(&fscache_n_object_dead);
39618 goto terminal_transit;
39619
39620 /* complain about the object being woken up once it is
39621@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
39622 parent->cookie->def->name, cookie->def->name,
39623 object->cache->tag->name);
39624
39625- fscache_stat(&fscache_n_object_lookups);
39626+ fscache_stat_unchecked(&fscache_n_object_lookups);
39627 fscache_stat(&fscache_n_cop_lookup_object);
39628 ret = object->cache->ops->lookup_object(object);
39629 fscache_stat_d(&fscache_n_cop_lookup_object);
39630@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
39631 if (ret == -ETIMEDOUT) {
39632 /* probably stuck behind another object, so move this one to
39633 * the back of the queue */
39634- fscache_stat(&fscache_n_object_lookups_timed_out);
39635+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
39636 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39637 }
39638
39639@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
39640
39641 spin_lock(&object->lock);
39642 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39643- fscache_stat(&fscache_n_object_lookups_negative);
39644+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
39645
39646 /* transit here to allow write requests to begin stacking up
39647 * and read requests to begin returning ENODATA */
39648@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
39649 * result, in which case there may be data available */
39650 spin_lock(&object->lock);
39651 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39652- fscache_stat(&fscache_n_object_lookups_positive);
39653+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
39654
39655 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
39656
39657@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
39658 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39659 } else {
39660 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
39661- fscache_stat(&fscache_n_object_created);
39662+ fscache_stat_unchecked(&fscache_n_object_created);
39663
39664 object->state = FSCACHE_OBJECT_AVAILABLE;
39665 spin_unlock(&object->lock);
39666@@ -602,7 +602,7 @@ static void fscache_object_available(str
39667 fscache_enqueue_dependents(object);
39668
39669 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
39670- fscache_stat(&fscache_n_object_avail);
39671+ fscache_stat_unchecked(&fscache_n_object_avail);
39672
39673 _leave("");
39674 }
39675@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
39676 enum fscache_checkaux result;
39677
39678 if (!object->cookie->def->check_aux) {
39679- fscache_stat(&fscache_n_checkaux_none);
39680+ fscache_stat_unchecked(&fscache_n_checkaux_none);
39681 return FSCACHE_CHECKAUX_OKAY;
39682 }
39683
39684@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
39685 switch (result) {
39686 /* entry okay as is */
39687 case FSCACHE_CHECKAUX_OKAY:
39688- fscache_stat(&fscache_n_checkaux_okay);
39689+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
39690 break;
39691
39692 /* entry requires update */
39693 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
39694- fscache_stat(&fscache_n_checkaux_update);
39695+ fscache_stat_unchecked(&fscache_n_checkaux_update);
39696 break;
39697
39698 /* entry requires deletion */
39699 case FSCACHE_CHECKAUX_OBSOLETE:
39700- fscache_stat(&fscache_n_checkaux_obsolete);
39701+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
39702 break;
39703
39704 default:
39705diff -urNp linux-3.0.4/fs/fscache/operation.c linux-3.0.4/fs/fscache/operation.c
39706--- linux-3.0.4/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
39707+++ linux-3.0.4/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
39708@@ -17,7 +17,7 @@
39709 #include <linux/slab.h>
39710 #include "internal.h"
39711
39712-atomic_t fscache_op_debug_id;
39713+atomic_unchecked_t fscache_op_debug_id;
39714 EXPORT_SYMBOL(fscache_op_debug_id);
39715
39716 /**
39717@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
39718 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
39719 ASSERTCMP(atomic_read(&op->usage), >, 0);
39720
39721- fscache_stat(&fscache_n_op_enqueue);
39722+ fscache_stat_unchecked(&fscache_n_op_enqueue);
39723 switch (op->flags & FSCACHE_OP_TYPE) {
39724 case FSCACHE_OP_ASYNC:
39725 _debug("queue async");
39726@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
39727 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
39728 if (op->processor)
39729 fscache_enqueue_operation(op);
39730- fscache_stat(&fscache_n_op_run);
39731+ fscache_stat_unchecked(&fscache_n_op_run);
39732 }
39733
39734 /*
39735@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
39736 if (object->n_ops > 1) {
39737 atomic_inc(&op->usage);
39738 list_add_tail(&op->pend_link, &object->pending_ops);
39739- fscache_stat(&fscache_n_op_pend);
39740+ fscache_stat_unchecked(&fscache_n_op_pend);
39741 } else if (!list_empty(&object->pending_ops)) {
39742 atomic_inc(&op->usage);
39743 list_add_tail(&op->pend_link, &object->pending_ops);
39744- fscache_stat(&fscache_n_op_pend);
39745+ fscache_stat_unchecked(&fscache_n_op_pend);
39746 fscache_start_operations(object);
39747 } else {
39748 ASSERTCMP(object->n_in_progress, ==, 0);
39749@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
39750 object->n_exclusive++; /* reads and writes must wait */
39751 atomic_inc(&op->usage);
39752 list_add_tail(&op->pend_link, &object->pending_ops);
39753- fscache_stat(&fscache_n_op_pend);
39754+ fscache_stat_unchecked(&fscache_n_op_pend);
39755 ret = 0;
39756 } else {
39757 /* not allowed to submit ops in any other state */
39758@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
39759 if (object->n_exclusive > 0) {
39760 atomic_inc(&op->usage);
39761 list_add_tail(&op->pend_link, &object->pending_ops);
39762- fscache_stat(&fscache_n_op_pend);
39763+ fscache_stat_unchecked(&fscache_n_op_pend);
39764 } else if (!list_empty(&object->pending_ops)) {
39765 atomic_inc(&op->usage);
39766 list_add_tail(&op->pend_link, &object->pending_ops);
39767- fscache_stat(&fscache_n_op_pend);
39768+ fscache_stat_unchecked(&fscache_n_op_pend);
39769 fscache_start_operations(object);
39770 } else {
39771 ASSERTCMP(object->n_exclusive, ==, 0);
39772@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
39773 object->n_ops++;
39774 atomic_inc(&op->usage);
39775 list_add_tail(&op->pend_link, &object->pending_ops);
39776- fscache_stat(&fscache_n_op_pend);
39777+ fscache_stat_unchecked(&fscache_n_op_pend);
39778 ret = 0;
39779 } else if (object->state == FSCACHE_OBJECT_DYING ||
39780 object->state == FSCACHE_OBJECT_LC_DYING ||
39781 object->state == FSCACHE_OBJECT_WITHDRAWING) {
39782- fscache_stat(&fscache_n_op_rejected);
39783+ fscache_stat_unchecked(&fscache_n_op_rejected);
39784 ret = -ENOBUFS;
39785 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
39786 fscache_report_unexpected_submission(object, op, ostate);
39787@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
39788
39789 ret = -EBUSY;
39790 if (!list_empty(&op->pend_link)) {
39791- fscache_stat(&fscache_n_op_cancelled);
39792+ fscache_stat_unchecked(&fscache_n_op_cancelled);
39793 list_del_init(&op->pend_link);
39794 object->n_ops--;
39795 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
39796@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
39797 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
39798 BUG();
39799
39800- fscache_stat(&fscache_n_op_release);
39801+ fscache_stat_unchecked(&fscache_n_op_release);
39802
39803 if (op->release) {
39804 op->release(op);
39805@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
39806 * lock, and defer it otherwise */
39807 if (!spin_trylock(&object->lock)) {
39808 _debug("defer put");
39809- fscache_stat(&fscache_n_op_deferred_release);
39810+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
39811
39812 cache = object->cache;
39813 spin_lock(&cache->op_gc_list_lock);
39814@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
39815
39816 _debug("GC DEFERRED REL OBJ%x OP%x",
39817 object->debug_id, op->debug_id);
39818- fscache_stat(&fscache_n_op_gc);
39819+ fscache_stat_unchecked(&fscache_n_op_gc);
39820
39821 ASSERTCMP(atomic_read(&op->usage), ==, 0);
39822
39823diff -urNp linux-3.0.4/fs/fscache/page.c linux-3.0.4/fs/fscache/page.c
39824--- linux-3.0.4/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
39825+++ linux-3.0.4/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
39826@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
39827 val = radix_tree_lookup(&cookie->stores, page->index);
39828 if (!val) {
39829 rcu_read_unlock();
39830- fscache_stat(&fscache_n_store_vmscan_not_storing);
39831+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
39832 __fscache_uncache_page(cookie, page);
39833 return true;
39834 }
39835@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
39836 spin_unlock(&cookie->stores_lock);
39837
39838 if (xpage) {
39839- fscache_stat(&fscache_n_store_vmscan_cancelled);
39840- fscache_stat(&fscache_n_store_radix_deletes);
39841+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
39842+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39843 ASSERTCMP(xpage, ==, page);
39844 } else {
39845- fscache_stat(&fscache_n_store_vmscan_gone);
39846+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
39847 }
39848
39849 wake_up_bit(&cookie->flags, 0);
39850@@ -107,7 +107,7 @@ page_busy:
39851 /* we might want to wait here, but that could deadlock the allocator as
39852 * the work threads writing to the cache may all end up sleeping
39853 * on memory allocation */
39854- fscache_stat(&fscache_n_store_vmscan_busy);
39855+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
39856 return false;
39857 }
39858 EXPORT_SYMBOL(__fscache_maybe_release_page);
39859@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
39860 FSCACHE_COOKIE_STORING_TAG);
39861 if (!radix_tree_tag_get(&cookie->stores, page->index,
39862 FSCACHE_COOKIE_PENDING_TAG)) {
39863- fscache_stat(&fscache_n_store_radix_deletes);
39864+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39865 xpage = radix_tree_delete(&cookie->stores, page->index);
39866 }
39867 spin_unlock(&cookie->stores_lock);
39868@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
39869
39870 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
39871
39872- fscache_stat(&fscache_n_attr_changed_calls);
39873+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
39874
39875 if (fscache_object_is_active(object)) {
39876 fscache_stat(&fscache_n_cop_attr_changed);
39877@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
39878
39879 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39880
39881- fscache_stat(&fscache_n_attr_changed);
39882+ fscache_stat_unchecked(&fscache_n_attr_changed);
39883
39884 op = kzalloc(sizeof(*op), GFP_KERNEL);
39885 if (!op) {
39886- fscache_stat(&fscache_n_attr_changed_nomem);
39887+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
39888 _leave(" = -ENOMEM");
39889 return -ENOMEM;
39890 }
39891@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
39892 if (fscache_submit_exclusive_op(object, op) < 0)
39893 goto nobufs;
39894 spin_unlock(&cookie->lock);
39895- fscache_stat(&fscache_n_attr_changed_ok);
39896+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
39897 fscache_put_operation(op);
39898 _leave(" = 0");
39899 return 0;
39900@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
39901 nobufs:
39902 spin_unlock(&cookie->lock);
39903 kfree(op);
39904- fscache_stat(&fscache_n_attr_changed_nobufs);
39905+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
39906 _leave(" = %d", -ENOBUFS);
39907 return -ENOBUFS;
39908 }
39909@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
39910 /* allocate a retrieval operation and attempt to submit it */
39911 op = kzalloc(sizeof(*op), GFP_NOIO);
39912 if (!op) {
39913- fscache_stat(&fscache_n_retrievals_nomem);
39914+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39915 return NULL;
39916 }
39917
39918@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
39919 return 0;
39920 }
39921
39922- fscache_stat(&fscache_n_retrievals_wait);
39923+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
39924
39925 jif = jiffies;
39926 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
39927 fscache_wait_bit_interruptible,
39928 TASK_INTERRUPTIBLE) != 0) {
39929- fscache_stat(&fscache_n_retrievals_intr);
39930+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
39931 _leave(" = -ERESTARTSYS");
39932 return -ERESTARTSYS;
39933 }
39934@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
39935 */
39936 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
39937 struct fscache_retrieval *op,
39938- atomic_t *stat_op_waits,
39939- atomic_t *stat_object_dead)
39940+ atomic_unchecked_t *stat_op_waits,
39941+ atomic_unchecked_t *stat_object_dead)
39942 {
39943 int ret;
39944
39945@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
39946 goto check_if_dead;
39947
39948 _debug(">>> WT");
39949- fscache_stat(stat_op_waits);
39950+ fscache_stat_unchecked(stat_op_waits);
39951 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
39952 fscache_wait_bit_interruptible,
39953 TASK_INTERRUPTIBLE) < 0) {
39954@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
39955
39956 check_if_dead:
39957 if (unlikely(fscache_object_is_dead(object))) {
39958- fscache_stat(stat_object_dead);
39959+ fscache_stat_unchecked(stat_object_dead);
39960 return -ENOBUFS;
39961 }
39962 return 0;
39963@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
39964
39965 _enter("%p,%p,,,", cookie, page);
39966
39967- fscache_stat(&fscache_n_retrievals);
39968+ fscache_stat_unchecked(&fscache_n_retrievals);
39969
39970 if (hlist_empty(&cookie->backing_objects))
39971 goto nobufs;
39972@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
39973 goto nobufs_unlock;
39974 spin_unlock(&cookie->lock);
39975
39976- fscache_stat(&fscache_n_retrieval_ops);
39977+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
39978
39979 /* pin the netfs read context in case we need to do the actual netfs
39980 * read because we've encountered a cache read failure */
39981@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
39982
39983 error:
39984 if (ret == -ENOMEM)
39985- fscache_stat(&fscache_n_retrievals_nomem);
39986+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39987 else if (ret == -ERESTARTSYS)
39988- fscache_stat(&fscache_n_retrievals_intr);
39989+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
39990 else if (ret == -ENODATA)
39991- fscache_stat(&fscache_n_retrievals_nodata);
39992+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39993 else if (ret < 0)
39994- fscache_stat(&fscache_n_retrievals_nobufs);
39995+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39996 else
39997- fscache_stat(&fscache_n_retrievals_ok);
39998+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
39999
40000 fscache_put_retrieval(op);
40001 _leave(" = %d", ret);
40002@@ -429,7 +429,7 @@ nobufs_unlock:
40003 spin_unlock(&cookie->lock);
40004 kfree(op);
40005 nobufs:
40006- fscache_stat(&fscache_n_retrievals_nobufs);
40007+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40008 _leave(" = -ENOBUFS");
40009 return -ENOBUFS;
40010 }
40011@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
40012
40013 _enter("%p,,%d,,,", cookie, *nr_pages);
40014
40015- fscache_stat(&fscache_n_retrievals);
40016+ fscache_stat_unchecked(&fscache_n_retrievals);
40017
40018 if (hlist_empty(&cookie->backing_objects))
40019 goto nobufs;
40020@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
40021 goto nobufs_unlock;
40022 spin_unlock(&cookie->lock);
40023
40024- fscache_stat(&fscache_n_retrieval_ops);
40025+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
40026
40027 /* pin the netfs read context in case we need to do the actual netfs
40028 * read because we've encountered a cache read failure */
40029@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
40030
40031 error:
40032 if (ret == -ENOMEM)
40033- fscache_stat(&fscache_n_retrievals_nomem);
40034+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
40035 else if (ret == -ERESTARTSYS)
40036- fscache_stat(&fscache_n_retrievals_intr);
40037+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
40038 else if (ret == -ENODATA)
40039- fscache_stat(&fscache_n_retrievals_nodata);
40040+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
40041 else if (ret < 0)
40042- fscache_stat(&fscache_n_retrievals_nobufs);
40043+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40044 else
40045- fscache_stat(&fscache_n_retrievals_ok);
40046+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
40047
40048 fscache_put_retrieval(op);
40049 _leave(" = %d", ret);
40050@@ -545,7 +545,7 @@ nobufs_unlock:
40051 spin_unlock(&cookie->lock);
40052 kfree(op);
40053 nobufs:
40054- fscache_stat(&fscache_n_retrievals_nobufs);
40055+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40056 _leave(" = -ENOBUFS");
40057 return -ENOBUFS;
40058 }
40059@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
40060
40061 _enter("%p,%p,,,", cookie, page);
40062
40063- fscache_stat(&fscache_n_allocs);
40064+ fscache_stat_unchecked(&fscache_n_allocs);
40065
40066 if (hlist_empty(&cookie->backing_objects))
40067 goto nobufs;
40068@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
40069 goto nobufs_unlock;
40070 spin_unlock(&cookie->lock);
40071
40072- fscache_stat(&fscache_n_alloc_ops);
40073+ fscache_stat_unchecked(&fscache_n_alloc_ops);
40074
40075 ret = fscache_wait_for_retrieval_activation(
40076 object, op,
40077@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
40078
40079 error:
40080 if (ret == -ERESTARTSYS)
40081- fscache_stat(&fscache_n_allocs_intr);
40082+ fscache_stat_unchecked(&fscache_n_allocs_intr);
40083 else if (ret < 0)
40084- fscache_stat(&fscache_n_allocs_nobufs);
40085+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40086 else
40087- fscache_stat(&fscache_n_allocs_ok);
40088+ fscache_stat_unchecked(&fscache_n_allocs_ok);
40089
40090 fscache_put_retrieval(op);
40091 _leave(" = %d", ret);
40092@@ -625,7 +625,7 @@ nobufs_unlock:
40093 spin_unlock(&cookie->lock);
40094 kfree(op);
40095 nobufs:
40096- fscache_stat(&fscache_n_allocs_nobufs);
40097+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40098 _leave(" = -ENOBUFS");
40099 return -ENOBUFS;
40100 }
40101@@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
40102
40103 spin_lock(&cookie->stores_lock);
40104
40105- fscache_stat(&fscache_n_store_calls);
40106+ fscache_stat_unchecked(&fscache_n_store_calls);
40107
40108 /* find a page to store */
40109 page = NULL;
40110@@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
40111 page = results[0];
40112 _debug("gang %d [%lx]", n, page->index);
40113 if (page->index > op->store_limit) {
40114- fscache_stat(&fscache_n_store_pages_over_limit);
40115+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
40116 goto superseded;
40117 }
40118
40119@@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
40120 spin_unlock(&cookie->stores_lock);
40121 spin_unlock(&object->lock);
40122
40123- fscache_stat(&fscache_n_store_pages);
40124+ fscache_stat_unchecked(&fscache_n_store_pages);
40125 fscache_stat(&fscache_n_cop_write_page);
40126 ret = object->cache->ops->write_page(op, page);
40127 fscache_stat_d(&fscache_n_cop_write_page);
40128@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
40129 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40130 ASSERT(PageFsCache(page));
40131
40132- fscache_stat(&fscache_n_stores);
40133+ fscache_stat_unchecked(&fscache_n_stores);
40134
40135 op = kzalloc(sizeof(*op), GFP_NOIO);
40136 if (!op)
40137@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
40138 spin_unlock(&cookie->stores_lock);
40139 spin_unlock(&object->lock);
40140
40141- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
40142+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
40143 op->store_limit = object->store_limit;
40144
40145 if (fscache_submit_op(object, &op->op) < 0)
40146@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
40147
40148 spin_unlock(&cookie->lock);
40149 radix_tree_preload_end();
40150- fscache_stat(&fscache_n_store_ops);
40151- fscache_stat(&fscache_n_stores_ok);
40152+ fscache_stat_unchecked(&fscache_n_store_ops);
40153+ fscache_stat_unchecked(&fscache_n_stores_ok);
40154
40155 /* the work queue now carries its own ref on the object */
40156 fscache_put_operation(&op->op);
40157@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
40158 return 0;
40159
40160 already_queued:
40161- fscache_stat(&fscache_n_stores_again);
40162+ fscache_stat_unchecked(&fscache_n_stores_again);
40163 already_pending:
40164 spin_unlock(&cookie->stores_lock);
40165 spin_unlock(&object->lock);
40166 spin_unlock(&cookie->lock);
40167 radix_tree_preload_end();
40168 kfree(op);
40169- fscache_stat(&fscache_n_stores_ok);
40170+ fscache_stat_unchecked(&fscache_n_stores_ok);
40171 _leave(" = 0");
40172 return 0;
40173
40174@@ -851,14 +851,14 @@ nobufs:
40175 spin_unlock(&cookie->lock);
40176 radix_tree_preload_end();
40177 kfree(op);
40178- fscache_stat(&fscache_n_stores_nobufs);
40179+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
40180 _leave(" = -ENOBUFS");
40181 return -ENOBUFS;
40182
40183 nomem_free:
40184 kfree(op);
40185 nomem:
40186- fscache_stat(&fscache_n_stores_oom);
40187+ fscache_stat_unchecked(&fscache_n_stores_oom);
40188 _leave(" = -ENOMEM");
40189 return -ENOMEM;
40190 }
40191@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
40192 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40193 ASSERTCMP(page, !=, NULL);
40194
40195- fscache_stat(&fscache_n_uncaches);
40196+ fscache_stat_unchecked(&fscache_n_uncaches);
40197
40198 /* cache withdrawal may beat us to it */
40199 if (!PageFsCache(page))
40200@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
40201 unsigned long loop;
40202
40203 #ifdef CONFIG_FSCACHE_STATS
40204- atomic_add(pagevec->nr, &fscache_n_marks);
40205+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
40206 #endif
40207
40208 for (loop = 0; loop < pagevec->nr; loop++) {
40209diff -urNp linux-3.0.4/fs/fscache/stats.c linux-3.0.4/fs/fscache/stats.c
40210--- linux-3.0.4/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
40211+++ linux-3.0.4/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
40212@@ -18,95 +18,95 @@
40213 /*
40214 * operation counters
40215 */
40216-atomic_t fscache_n_op_pend;
40217-atomic_t fscache_n_op_run;
40218-atomic_t fscache_n_op_enqueue;
40219-atomic_t fscache_n_op_requeue;
40220-atomic_t fscache_n_op_deferred_release;
40221-atomic_t fscache_n_op_release;
40222-atomic_t fscache_n_op_gc;
40223-atomic_t fscache_n_op_cancelled;
40224-atomic_t fscache_n_op_rejected;
40225-
40226-atomic_t fscache_n_attr_changed;
40227-atomic_t fscache_n_attr_changed_ok;
40228-atomic_t fscache_n_attr_changed_nobufs;
40229-atomic_t fscache_n_attr_changed_nomem;
40230-atomic_t fscache_n_attr_changed_calls;
40231-
40232-atomic_t fscache_n_allocs;
40233-atomic_t fscache_n_allocs_ok;
40234-atomic_t fscache_n_allocs_wait;
40235-atomic_t fscache_n_allocs_nobufs;
40236-atomic_t fscache_n_allocs_intr;
40237-atomic_t fscache_n_allocs_object_dead;
40238-atomic_t fscache_n_alloc_ops;
40239-atomic_t fscache_n_alloc_op_waits;
40240-
40241-atomic_t fscache_n_retrievals;
40242-atomic_t fscache_n_retrievals_ok;
40243-atomic_t fscache_n_retrievals_wait;
40244-atomic_t fscache_n_retrievals_nodata;
40245-atomic_t fscache_n_retrievals_nobufs;
40246-atomic_t fscache_n_retrievals_intr;
40247-atomic_t fscache_n_retrievals_nomem;
40248-atomic_t fscache_n_retrievals_object_dead;
40249-atomic_t fscache_n_retrieval_ops;
40250-atomic_t fscache_n_retrieval_op_waits;
40251-
40252-atomic_t fscache_n_stores;
40253-atomic_t fscache_n_stores_ok;
40254-atomic_t fscache_n_stores_again;
40255-atomic_t fscache_n_stores_nobufs;
40256-atomic_t fscache_n_stores_oom;
40257-atomic_t fscache_n_store_ops;
40258-atomic_t fscache_n_store_calls;
40259-atomic_t fscache_n_store_pages;
40260-atomic_t fscache_n_store_radix_deletes;
40261-atomic_t fscache_n_store_pages_over_limit;
40262-
40263-atomic_t fscache_n_store_vmscan_not_storing;
40264-atomic_t fscache_n_store_vmscan_gone;
40265-atomic_t fscache_n_store_vmscan_busy;
40266-atomic_t fscache_n_store_vmscan_cancelled;
40267-
40268-atomic_t fscache_n_marks;
40269-atomic_t fscache_n_uncaches;
40270-
40271-atomic_t fscache_n_acquires;
40272-atomic_t fscache_n_acquires_null;
40273-atomic_t fscache_n_acquires_no_cache;
40274-atomic_t fscache_n_acquires_ok;
40275-atomic_t fscache_n_acquires_nobufs;
40276-atomic_t fscache_n_acquires_oom;
40277-
40278-atomic_t fscache_n_updates;
40279-atomic_t fscache_n_updates_null;
40280-atomic_t fscache_n_updates_run;
40281-
40282-atomic_t fscache_n_relinquishes;
40283-atomic_t fscache_n_relinquishes_null;
40284-atomic_t fscache_n_relinquishes_waitcrt;
40285-atomic_t fscache_n_relinquishes_retire;
40286-
40287-atomic_t fscache_n_cookie_index;
40288-atomic_t fscache_n_cookie_data;
40289-atomic_t fscache_n_cookie_special;
40290-
40291-atomic_t fscache_n_object_alloc;
40292-atomic_t fscache_n_object_no_alloc;
40293-atomic_t fscache_n_object_lookups;
40294-atomic_t fscache_n_object_lookups_negative;
40295-atomic_t fscache_n_object_lookups_positive;
40296-atomic_t fscache_n_object_lookups_timed_out;
40297-atomic_t fscache_n_object_created;
40298-atomic_t fscache_n_object_avail;
40299-atomic_t fscache_n_object_dead;
40300-
40301-atomic_t fscache_n_checkaux_none;
40302-atomic_t fscache_n_checkaux_okay;
40303-atomic_t fscache_n_checkaux_update;
40304-atomic_t fscache_n_checkaux_obsolete;
40305+atomic_unchecked_t fscache_n_op_pend;
40306+atomic_unchecked_t fscache_n_op_run;
40307+atomic_unchecked_t fscache_n_op_enqueue;
40308+atomic_unchecked_t fscache_n_op_requeue;
40309+atomic_unchecked_t fscache_n_op_deferred_release;
40310+atomic_unchecked_t fscache_n_op_release;
40311+atomic_unchecked_t fscache_n_op_gc;
40312+atomic_unchecked_t fscache_n_op_cancelled;
40313+atomic_unchecked_t fscache_n_op_rejected;
40314+
40315+atomic_unchecked_t fscache_n_attr_changed;
40316+atomic_unchecked_t fscache_n_attr_changed_ok;
40317+atomic_unchecked_t fscache_n_attr_changed_nobufs;
40318+atomic_unchecked_t fscache_n_attr_changed_nomem;
40319+atomic_unchecked_t fscache_n_attr_changed_calls;
40320+
40321+atomic_unchecked_t fscache_n_allocs;
40322+atomic_unchecked_t fscache_n_allocs_ok;
40323+atomic_unchecked_t fscache_n_allocs_wait;
40324+atomic_unchecked_t fscache_n_allocs_nobufs;
40325+atomic_unchecked_t fscache_n_allocs_intr;
40326+atomic_unchecked_t fscache_n_allocs_object_dead;
40327+atomic_unchecked_t fscache_n_alloc_ops;
40328+atomic_unchecked_t fscache_n_alloc_op_waits;
40329+
40330+atomic_unchecked_t fscache_n_retrievals;
40331+atomic_unchecked_t fscache_n_retrievals_ok;
40332+atomic_unchecked_t fscache_n_retrievals_wait;
40333+atomic_unchecked_t fscache_n_retrievals_nodata;
40334+atomic_unchecked_t fscache_n_retrievals_nobufs;
40335+atomic_unchecked_t fscache_n_retrievals_intr;
40336+atomic_unchecked_t fscache_n_retrievals_nomem;
40337+atomic_unchecked_t fscache_n_retrievals_object_dead;
40338+atomic_unchecked_t fscache_n_retrieval_ops;
40339+atomic_unchecked_t fscache_n_retrieval_op_waits;
40340+
40341+atomic_unchecked_t fscache_n_stores;
40342+atomic_unchecked_t fscache_n_stores_ok;
40343+atomic_unchecked_t fscache_n_stores_again;
40344+atomic_unchecked_t fscache_n_stores_nobufs;
40345+atomic_unchecked_t fscache_n_stores_oom;
40346+atomic_unchecked_t fscache_n_store_ops;
40347+atomic_unchecked_t fscache_n_store_calls;
40348+atomic_unchecked_t fscache_n_store_pages;
40349+atomic_unchecked_t fscache_n_store_radix_deletes;
40350+atomic_unchecked_t fscache_n_store_pages_over_limit;
40351+
40352+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
40353+atomic_unchecked_t fscache_n_store_vmscan_gone;
40354+atomic_unchecked_t fscache_n_store_vmscan_busy;
40355+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
40356+
40357+atomic_unchecked_t fscache_n_marks;
40358+atomic_unchecked_t fscache_n_uncaches;
40359+
40360+atomic_unchecked_t fscache_n_acquires;
40361+atomic_unchecked_t fscache_n_acquires_null;
40362+atomic_unchecked_t fscache_n_acquires_no_cache;
40363+atomic_unchecked_t fscache_n_acquires_ok;
40364+atomic_unchecked_t fscache_n_acquires_nobufs;
40365+atomic_unchecked_t fscache_n_acquires_oom;
40366+
40367+atomic_unchecked_t fscache_n_updates;
40368+atomic_unchecked_t fscache_n_updates_null;
40369+atomic_unchecked_t fscache_n_updates_run;
40370+
40371+atomic_unchecked_t fscache_n_relinquishes;
40372+atomic_unchecked_t fscache_n_relinquishes_null;
40373+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
40374+atomic_unchecked_t fscache_n_relinquishes_retire;
40375+
40376+atomic_unchecked_t fscache_n_cookie_index;
40377+atomic_unchecked_t fscache_n_cookie_data;
40378+atomic_unchecked_t fscache_n_cookie_special;
40379+
40380+atomic_unchecked_t fscache_n_object_alloc;
40381+atomic_unchecked_t fscache_n_object_no_alloc;
40382+atomic_unchecked_t fscache_n_object_lookups;
40383+atomic_unchecked_t fscache_n_object_lookups_negative;
40384+atomic_unchecked_t fscache_n_object_lookups_positive;
40385+atomic_unchecked_t fscache_n_object_lookups_timed_out;
40386+atomic_unchecked_t fscache_n_object_created;
40387+atomic_unchecked_t fscache_n_object_avail;
40388+atomic_unchecked_t fscache_n_object_dead;
40389+
40390+atomic_unchecked_t fscache_n_checkaux_none;
40391+atomic_unchecked_t fscache_n_checkaux_okay;
40392+atomic_unchecked_t fscache_n_checkaux_update;
40393+atomic_unchecked_t fscache_n_checkaux_obsolete;
40394
40395 atomic_t fscache_n_cop_alloc_object;
40396 atomic_t fscache_n_cop_lookup_object;
40397@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
40398 seq_puts(m, "FS-Cache statistics\n");
40399
40400 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
40401- atomic_read(&fscache_n_cookie_index),
40402- atomic_read(&fscache_n_cookie_data),
40403- atomic_read(&fscache_n_cookie_special));
40404+ atomic_read_unchecked(&fscache_n_cookie_index),
40405+ atomic_read_unchecked(&fscache_n_cookie_data),
40406+ atomic_read_unchecked(&fscache_n_cookie_special));
40407
40408 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
40409- atomic_read(&fscache_n_object_alloc),
40410- atomic_read(&fscache_n_object_no_alloc),
40411- atomic_read(&fscache_n_object_avail),
40412- atomic_read(&fscache_n_object_dead));
40413+ atomic_read_unchecked(&fscache_n_object_alloc),
40414+ atomic_read_unchecked(&fscache_n_object_no_alloc),
40415+ atomic_read_unchecked(&fscache_n_object_avail),
40416+ atomic_read_unchecked(&fscache_n_object_dead));
40417 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
40418- atomic_read(&fscache_n_checkaux_none),
40419- atomic_read(&fscache_n_checkaux_okay),
40420- atomic_read(&fscache_n_checkaux_update),
40421- atomic_read(&fscache_n_checkaux_obsolete));
40422+ atomic_read_unchecked(&fscache_n_checkaux_none),
40423+ atomic_read_unchecked(&fscache_n_checkaux_okay),
40424+ atomic_read_unchecked(&fscache_n_checkaux_update),
40425+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
40426
40427 seq_printf(m, "Pages : mrk=%u unc=%u\n",
40428- atomic_read(&fscache_n_marks),
40429- atomic_read(&fscache_n_uncaches));
40430+ atomic_read_unchecked(&fscache_n_marks),
40431+ atomic_read_unchecked(&fscache_n_uncaches));
40432
40433 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
40434 " oom=%u\n",
40435- atomic_read(&fscache_n_acquires),
40436- atomic_read(&fscache_n_acquires_null),
40437- atomic_read(&fscache_n_acquires_no_cache),
40438- atomic_read(&fscache_n_acquires_ok),
40439- atomic_read(&fscache_n_acquires_nobufs),
40440- atomic_read(&fscache_n_acquires_oom));
40441+ atomic_read_unchecked(&fscache_n_acquires),
40442+ atomic_read_unchecked(&fscache_n_acquires_null),
40443+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
40444+ atomic_read_unchecked(&fscache_n_acquires_ok),
40445+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
40446+ atomic_read_unchecked(&fscache_n_acquires_oom));
40447
40448 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
40449- atomic_read(&fscache_n_object_lookups),
40450- atomic_read(&fscache_n_object_lookups_negative),
40451- atomic_read(&fscache_n_object_lookups_positive),
40452- atomic_read(&fscache_n_object_created),
40453- atomic_read(&fscache_n_object_lookups_timed_out));
40454+ atomic_read_unchecked(&fscache_n_object_lookups),
40455+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
40456+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
40457+ atomic_read_unchecked(&fscache_n_object_created),
40458+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
40459
40460 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
40461- atomic_read(&fscache_n_updates),
40462- atomic_read(&fscache_n_updates_null),
40463- atomic_read(&fscache_n_updates_run));
40464+ atomic_read_unchecked(&fscache_n_updates),
40465+ atomic_read_unchecked(&fscache_n_updates_null),
40466+ atomic_read_unchecked(&fscache_n_updates_run));
40467
40468 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
40469- atomic_read(&fscache_n_relinquishes),
40470- atomic_read(&fscache_n_relinquishes_null),
40471- atomic_read(&fscache_n_relinquishes_waitcrt),
40472- atomic_read(&fscache_n_relinquishes_retire));
40473+ atomic_read_unchecked(&fscache_n_relinquishes),
40474+ atomic_read_unchecked(&fscache_n_relinquishes_null),
40475+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
40476+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
40477
40478 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
40479- atomic_read(&fscache_n_attr_changed),
40480- atomic_read(&fscache_n_attr_changed_ok),
40481- atomic_read(&fscache_n_attr_changed_nobufs),
40482- atomic_read(&fscache_n_attr_changed_nomem),
40483- atomic_read(&fscache_n_attr_changed_calls));
40484+ atomic_read_unchecked(&fscache_n_attr_changed),
40485+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
40486+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
40487+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
40488+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
40489
40490 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
40491- atomic_read(&fscache_n_allocs),
40492- atomic_read(&fscache_n_allocs_ok),
40493- atomic_read(&fscache_n_allocs_wait),
40494- atomic_read(&fscache_n_allocs_nobufs),
40495- atomic_read(&fscache_n_allocs_intr));
40496+ atomic_read_unchecked(&fscache_n_allocs),
40497+ atomic_read_unchecked(&fscache_n_allocs_ok),
40498+ atomic_read_unchecked(&fscache_n_allocs_wait),
40499+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
40500+ atomic_read_unchecked(&fscache_n_allocs_intr));
40501 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
40502- atomic_read(&fscache_n_alloc_ops),
40503- atomic_read(&fscache_n_alloc_op_waits),
40504- atomic_read(&fscache_n_allocs_object_dead));
40505+ atomic_read_unchecked(&fscache_n_alloc_ops),
40506+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
40507+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
40508
40509 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
40510 " int=%u oom=%u\n",
40511- atomic_read(&fscache_n_retrievals),
40512- atomic_read(&fscache_n_retrievals_ok),
40513- atomic_read(&fscache_n_retrievals_wait),
40514- atomic_read(&fscache_n_retrievals_nodata),
40515- atomic_read(&fscache_n_retrievals_nobufs),
40516- atomic_read(&fscache_n_retrievals_intr),
40517- atomic_read(&fscache_n_retrievals_nomem));
40518+ atomic_read_unchecked(&fscache_n_retrievals),
40519+ atomic_read_unchecked(&fscache_n_retrievals_ok),
40520+ atomic_read_unchecked(&fscache_n_retrievals_wait),
40521+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
40522+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
40523+ atomic_read_unchecked(&fscache_n_retrievals_intr),
40524+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
40525 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
40526- atomic_read(&fscache_n_retrieval_ops),
40527- atomic_read(&fscache_n_retrieval_op_waits),
40528- atomic_read(&fscache_n_retrievals_object_dead));
40529+ atomic_read_unchecked(&fscache_n_retrieval_ops),
40530+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
40531+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
40532
40533 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
40534- atomic_read(&fscache_n_stores),
40535- atomic_read(&fscache_n_stores_ok),
40536- atomic_read(&fscache_n_stores_again),
40537- atomic_read(&fscache_n_stores_nobufs),
40538- atomic_read(&fscache_n_stores_oom));
40539+ atomic_read_unchecked(&fscache_n_stores),
40540+ atomic_read_unchecked(&fscache_n_stores_ok),
40541+ atomic_read_unchecked(&fscache_n_stores_again),
40542+ atomic_read_unchecked(&fscache_n_stores_nobufs),
40543+ atomic_read_unchecked(&fscache_n_stores_oom));
40544 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
40545- atomic_read(&fscache_n_store_ops),
40546- atomic_read(&fscache_n_store_calls),
40547- atomic_read(&fscache_n_store_pages),
40548- atomic_read(&fscache_n_store_radix_deletes),
40549- atomic_read(&fscache_n_store_pages_over_limit));
40550+ atomic_read_unchecked(&fscache_n_store_ops),
40551+ atomic_read_unchecked(&fscache_n_store_calls),
40552+ atomic_read_unchecked(&fscache_n_store_pages),
40553+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
40554+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
40555
40556 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
40557- atomic_read(&fscache_n_store_vmscan_not_storing),
40558- atomic_read(&fscache_n_store_vmscan_gone),
40559- atomic_read(&fscache_n_store_vmscan_busy),
40560- atomic_read(&fscache_n_store_vmscan_cancelled));
40561+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
40562+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
40563+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
40564+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
40565
40566 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
40567- atomic_read(&fscache_n_op_pend),
40568- atomic_read(&fscache_n_op_run),
40569- atomic_read(&fscache_n_op_enqueue),
40570- atomic_read(&fscache_n_op_cancelled),
40571- atomic_read(&fscache_n_op_rejected));
40572+ atomic_read_unchecked(&fscache_n_op_pend),
40573+ atomic_read_unchecked(&fscache_n_op_run),
40574+ atomic_read_unchecked(&fscache_n_op_enqueue),
40575+ atomic_read_unchecked(&fscache_n_op_cancelled),
40576+ atomic_read_unchecked(&fscache_n_op_rejected));
40577 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
40578- atomic_read(&fscache_n_op_deferred_release),
40579- atomic_read(&fscache_n_op_release),
40580- atomic_read(&fscache_n_op_gc));
40581+ atomic_read_unchecked(&fscache_n_op_deferred_release),
40582+ atomic_read_unchecked(&fscache_n_op_release),
40583+ atomic_read_unchecked(&fscache_n_op_gc));
40584
40585 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
40586 atomic_read(&fscache_n_cop_alloc_object),
40587diff -urNp linux-3.0.4/fs/fs_struct.c linux-3.0.4/fs/fs_struct.c
40588--- linux-3.0.4/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
40589+++ linux-3.0.4/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
40590@@ -4,6 +4,7 @@
40591 #include <linux/path.h>
40592 #include <linux/slab.h>
40593 #include <linux/fs_struct.h>
40594+#include <linux/grsecurity.h>
40595 #include "internal.h"
40596
40597 static inline void path_get_longterm(struct path *path)
40598@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
40599 old_root = fs->root;
40600 fs->root = *path;
40601 path_get_longterm(path);
40602+ gr_set_chroot_entries(current, path);
40603 write_seqcount_end(&fs->seq);
40604 spin_unlock(&fs->lock);
40605 if (old_root.dentry)
40606@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
40607 && fs->root.mnt == old_root->mnt) {
40608 path_get_longterm(new_root);
40609 fs->root = *new_root;
40610+ gr_set_chroot_entries(p, new_root);
40611 count++;
40612 }
40613 if (fs->pwd.dentry == old_root->dentry
40614@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
40615 spin_lock(&fs->lock);
40616 write_seqcount_begin(&fs->seq);
40617 tsk->fs = NULL;
40618- kill = !--fs->users;
40619+ gr_clear_chroot_entries(tsk);
40620+ kill = !atomic_dec_return(&fs->users);
40621 write_seqcount_end(&fs->seq);
40622 spin_unlock(&fs->lock);
40623 task_unlock(tsk);
40624@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
40625 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
40626 /* We don't need to lock fs - think why ;-) */
40627 if (fs) {
40628- fs->users = 1;
40629+ atomic_set(&fs->users, 1);
40630 fs->in_exec = 0;
40631 spin_lock_init(&fs->lock);
40632 seqcount_init(&fs->seq);
40633@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
40634 spin_lock(&old->lock);
40635 fs->root = old->root;
40636 path_get_longterm(&fs->root);
40637+ /* instead of calling gr_set_chroot_entries here,
40638+ we call it from every caller of this function
40639+ */
40640 fs->pwd = old->pwd;
40641 path_get_longterm(&fs->pwd);
40642 spin_unlock(&old->lock);
40643@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
40644
40645 task_lock(current);
40646 spin_lock(&fs->lock);
40647- kill = !--fs->users;
40648+ kill = !atomic_dec_return(&fs->users);
40649 current->fs = new_fs;
40650+ gr_set_chroot_entries(current, &new_fs->root);
40651 spin_unlock(&fs->lock);
40652 task_unlock(current);
40653
40654@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
40655
40656 /* to be mentioned only in INIT_TASK */
40657 struct fs_struct init_fs = {
40658- .users = 1,
40659+ .users = ATOMIC_INIT(1),
40660 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
40661 .seq = SEQCNT_ZERO,
40662 .umask = 0022,
40663@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
40664 task_lock(current);
40665
40666 spin_lock(&init_fs.lock);
40667- init_fs.users++;
40668+ atomic_inc(&init_fs.users);
40669 spin_unlock(&init_fs.lock);
40670
40671 spin_lock(&fs->lock);
40672 current->fs = &init_fs;
40673- kill = !--fs->users;
40674+ gr_set_chroot_entries(current, &current->fs->root);
40675+ kill = !atomic_dec_return(&fs->users);
40676 spin_unlock(&fs->lock);
40677
40678 task_unlock(current);
40679diff -urNp linux-3.0.4/fs/fuse/cuse.c linux-3.0.4/fs/fuse/cuse.c
40680--- linux-3.0.4/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
40681+++ linux-3.0.4/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
40682@@ -586,10 +586,12 @@ static int __init cuse_init(void)
40683 INIT_LIST_HEAD(&cuse_conntbl[i]);
40684
40685 /* inherit and extend fuse_dev_operations */
40686- cuse_channel_fops = fuse_dev_operations;
40687- cuse_channel_fops.owner = THIS_MODULE;
40688- cuse_channel_fops.open = cuse_channel_open;
40689- cuse_channel_fops.release = cuse_channel_release;
40690+ pax_open_kernel();
40691+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
40692+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
40693+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
40694+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
40695+ pax_close_kernel();
40696
40697 cuse_class = class_create(THIS_MODULE, "cuse");
40698 if (IS_ERR(cuse_class))
40699diff -urNp linux-3.0.4/fs/fuse/dev.c linux-3.0.4/fs/fuse/dev.c
40700--- linux-3.0.4/fs/fuse/dev.c 2011-08-29 23:26:14.000000000 -0400
40701+++ linux-3.0.4/fs/fuse/dev.c 2011-08-29 23:26:27.000000000 -0400
40702@@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
40703 ret = 0;
40704 pipe_lock(pipe);
40705
40706- if (!pipe->readers) {
40707+ if (!atomic_read(&pipe->readers)) {
40708 send_sig(SIGPIPE, current, 0);
40709 if (!ret)
40710 ret = -EPIPE;
40711diff -urNp linux-3.0.4/fs/fuse/dir.c linux-3.0.4/fs/fuse/dir.c
40712--- linux-3.0.4/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
40713+++ linux-3.0.4/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
40714@@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
40715 return link;
40716 }
40717
40718-static void free_link(char *link)
40719+static void free_link(const char *link)
40720 {
40721 if (!IS_ERR(link))
40722 free_page((unsigned long) link);
40723diff -urNp linux-3.0.4/fs/gfs2/inode.c linux-3.0.4/fs/gfs2/inode.c
40724--- linux-3.0.4/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
40725+++ linux-3.0.4/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
40726@@ -1525,7 +1525,7 @@ out:
40727
40728 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
40729 {
40730- char *s = nd_get_link(nd);
40731+ const char *s = nd_get_link(nd);
40732 if (!IS_ERR(s))
40733 kfree(s);
40734 }
40735diff -urNp linux-3.0.4/fs/hfsplus/catalog.c linux-3.0.4/fs/hfsplus/catalog.c
40736--- linux-3.0.4/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
40737+++ linux-3.0.4/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
40738@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
40739 int err;
40740 u16 type;
40741
40742+ pax_track_stack();
40743+
40744 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
40745 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
40746 if (err)
40747@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
40748 int entry_size;
40749 int err;
40750
40751+ pax_track_stack();
40752+
40753 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
40754 str->name, cnid, inode->i_nlink);
40755 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
40756@@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
40757 int entry_size, type;
40758 int err = 0;
40759
40760+ pax_track_stack();
40761+
40762 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
40763 cnid, src_dir->i_ino, src_name->name,
40764 dst_dir->i_ino, dst_name->name);
40765diff -urNp linux-3.0.4/fs/hfsplus/dir.c linux-3.0.4/fs/hfsplus/dir.c
40766--- linux-3.0.4/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
40767+++ linux-3.0.4/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
40768@@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
40769 struct hfsplus_readdir_data *rd;
40770 u16 type;
40771
40772+ pax_track_stack();
40773+
40774 if (filp->f_pos >= inode->i_size)
40775 return 0;
40776
40777diff -urNp linux-3.0.4/fs/hfsplus/inode.c linux-3.0.4/fs/hfsplus/inode.c
40778--- linux-3.0.4/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
40779+++ linux-3.0.4/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
40780@@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
40781 int res = 0;
40782 u16 type;
40783
40784+ pax_track_stack();
40785+
40786 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
40787
40788 HFSPLUS_I(inode)->linkid = 0;
40789@@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
40790 struct hfs_find_data fd;
40791 hfsplus_cat_entry entry;
40792
40793+ pax_track_stack();
40794+
40795 if (HFSPLUS_IS_RSRC(inode))
40796 main_inode = HFSPLUS_I(inode)->rsrc_inode;
40797
40798diff -urNp linux-3.0.4/fs/hfsplus/ioctl.c linux-3.0.4/fs/hfsplus/ioctl.c
40799--- linux-3.0.4/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
40800+++ linux-3.0.4/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
40801@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
40802 struct hfsplus_cat_file *file;
40803 int res;
40804
40805+ pax_track_stack();
40806+
40807 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40808 return -EOPNOTSUPP;
40809
40810@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
40811 struct hfsplus_cat_file *file;
40812 ssize_t res = 0;
40813
40814+ pax_track_stack();
40815+
40816 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40817 return -EOPNOTSUPP;
40818
40819diff -urNp linux-3.0.4/fs/hfsplus/super.c linux-3.0.4/fs/hfsplus/super.c
40820--- linux-3.0.4/fs/hfsplus/super.c 2011-07-21 22:17:23.000000000 -0400
40821+++ linux-3.0.4/fs/hfsplus/super.c 2011-08-23 21:48:14.000000000 -0400
40822@@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
40823 struct nls_table *nls = NULL;
40824 int err;
40825
40826+ pax_track_stack();
40827+
40828 err = -EINVAL;
40829 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
40830 if (!sbi)
40831diff -urNp linux-3.0.4/fs/hugetlbfs/inode.c linux-3.0.4/fs/hugetlbfs/inode.c
40832--- linux-3.0.4/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
40833+++ linux-3.0.4/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
40834@@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
40835 .kill_sb = kill_litter_super,
40836 };
40837
40838-static struct vfsmount *hugetlbfs_vfsmount;
40839+struct vfsmount *hugetlbfs_vfsmount;
40840
40841 static int can_do_hugetlb_shm(void)
40842 {
40843diff -urNp linux-3.0.4/fs/inode.c linux-3.0.4/fs/inode.c
40844--- linux-3.0.4/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
40845+++ linux-3.0.4/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
40846@@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
40847
40848 #ifdef CONFIG_SMP
40849 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
40850- static atomic_t shared_last_ino;
40851- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
40852+ static atomic_unchecked_t shared_last_ino;
40853+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
40854
40855 res = next - LAST_INO_BATCH;
40856 }
40857diff -urNp linux-3.0.4/fs/jbd/checkpoint.c linux-3.0.4/fs/jbd/checkpoint.c
40858--- linux-3.0.4/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
40859+++ linux-3.0.4/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
40860@@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
40861 tid_t this_tid;
40862 int result;
40863
40864+ pax_track_stack();
40865+
40866 jbd_debug(1, "Start checkpoint\n");
40867
40868 /*
40869diff -urNp linux-3.0.4/fs/jffs2/compr_rtime.c linux-3.0.4/fs/jffs2/compr_rtime.c
40870--- linux-3.0.4/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
40871+++ linux-3.0.4/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
40872@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
40873 int outpos = 0;
40874 int pos=0;
40875
40876+ pax_track_stack();
40877+
40878 memset(positions,0,sizeof(positions));
40879
40880 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
40881@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
40882 int outpos = 0;
40883 int pos=0;
40884
40885+ pax_track_stack();
40886+
40887 memset(positions,0,sizeof(positions));
40888
40889 while (outpos<destlen) {
40890diff -urNp linux-3.0.4/fs/jffs2/compr_rubin.c linux-3.0.4/fs/jffs2/compr_rubin.c
40891--- linux-3.0.4/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
40892+++ linux-3.0.4/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
40893@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
40894 int ret;
40895 uint32_t mysrclen, mydstlen;
40896
40897+ pax_track_stack();
40898+
40899 mysrclen = *sourcelen;
40900 mydstlen = *dstlen - 8;
40901
40902diff -urNp linux-3.0.4/fs/jffs2/erase.c linux-3.0.4/fs/jffs2/erase.c
40903--- linux-3.0.4/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
40904+++ linux-3.0.4/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
40905@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
40906 struct jffs2_unknown_node marker = {
40907 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
40908 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40909- .totlen = cpu_to_je32(c->cleanmarker_size)
40910+ .totlen = cpu_to_je32(c->cleanmarker_size),
40911+ .hdr_crc = cpu_to_je32(0)
40912 };
40913
40914 jffs2_prealloc_raw_node_refs(c, jeb, 1);
40915diff -urNp linux-3.0.4/fs/jffs2/wbuf.c linux-3.0.4/fs/jffs2/wbuf.c
40916--- linux-3.0.4/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
40917+++ linux-3.0.4/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
40918@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
40919 {
40920 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
40921 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40922- .totlen = constant_cpu_to_je32(8)
40923+ .totlen = constant_cpu_to_je32(8),
40924+ .hdr_crc = constant_cpu_to_je32(0)
40925 };
40926
40927 /*
40928diff -urNp linux-3.0.4/fs/jffs2/xattr.c linux-3.0.4/fs/jffs2/xattr.c
40929--- linux-3.0.4/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
40930+++ linux-3.0.4/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
40931@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
40932
40933 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
40934
40935+ pax_track_stack();
40936+
40937 /* Phase.1 : Merge same xref */
40938 for (i=0; i < XREF_TMPHASH_SIZE; i++)
40939 xref_tmphash[i] = NULL;
40940diff -urNp linux-3.0.4/fs/jfs/super.c linux-3.0.4/fs/jfs/super.c
40941--- linux-3.0.4/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
40942+++ linux-3.0.4/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
40943@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
40944
40945 jfs_inode_cachep =
40946 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
40947- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
40948+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
40949 init_once);
40950 if (jfs_inode_cachep == NULL)
40951 return -ENOMEM;
40952diff -urNp linux-3.0.4/fs/Kconfig.binfmt linux-3.0.4/fs/Kconfig.binfmt
40953--- linux-3.0.4/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
40954+++ linux-3.0.4/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
40955@@ -86,7 +86,7 @@ config HAVE_AOUT
40956
40957 config BINFMT_AOUT
40958 tristate "Kernel support for a.out and ECOFF binaries"
40959- depends on HAVE_AOUT
40960+ depends on HAVE_AOUT && BROKEN
40961 ---help---
40962 A.out (Assembler.OUTput) is a set of formats for libraries and
40963 executables used in the earliest versions of UNIX. Linux used
40964diff -urNp linux-3.0.4/fs/libfs.c linux-3.0.4/fs/libfs.c
40965--- linux-3.0.4/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
40966+++ linux-3.0.4/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
40967@@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
40968
40969 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
40970 struct dentry *next;
40971+ char d_name[sizeof(next->d_iname)];
40972+ const unsigned char *name;
40973+
40974 next = list_entry(p, struct dentry, d_u.d_child);
40975 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
40976 if (!simple_positive(next)) {
40977@@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
40978
40979 spin_unlock(&next->d_lock);
40980 spin_unlock(&dentry->d_lock);
40981- if (filldir(dirent, next->d_name.name,
40982+ name = next->d_name.name;
40983+ if (name == next->d_iname) {
40984+ memcpy(d_name, name, next->d_name.len);
40985+ name = d_name;
40986+ }
40987+ if (filldir(dirent, name,
40988 next->d_name.len, filp->f_pos,
40989 next->d_inode->i_ino,
40990 dt_type(next->d_inode)) < 0)
40991diff -urNp linux-3.0.4/fs/lockd/clntproc.c linux-3.0.4/fs/lockd/clntproc.c
40992--- linux-3.0.4/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
40993+++ linux-3.0.4/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
40994@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
40995 /*
40996 * Cookie counter for NLM requests
40997 */
40998-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
40999+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
41000
41001 void nlmclnt_next_cookie(struct nlm_cookie *c)
41002 {
41003- u32 cookie = atomic_inc_return(&nlm_cookie);
41004+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
41005
41006 memcpy(c->data, &cookie, 4);
41007 c->len=4;
41008@@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
41009 struct nlm_rqst reqst, *req;
41010 int status;
41011
41012+ pax_track_stack();
41013+
41014 req = &reqst;
41015 memset(req, 0, sizeof(*req));
41016 locks_init_lock(&req->a_args.lock.fl);
41017diff -urNp linux-3.0.4/fs/locks.c linux-3.0.4/fs/locks.c
41018--- linux-3.0.4/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
41019+++ linux-3.0.4/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
41020@@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
41021 return;
41022
41023 if (filp->f_op && filp->f_op->flock) {
41024- struct file_lock fl = {
41025+ struct file_lock flock = {
41026 .fl_pid = current->tgid,
41027 .fl_file = filp,
41028 .fl_flags = FL_FLOCK,
41029 .fl_type = F_UNLCK,
41030 .fl_end = OFFSET_MAX,
41031 };
41032- filp->f_op->flock(filp, F_SETLKW, &fl);
41033- if (fl.fl_ops && fl.fl_ops->fl_release_private)
41034- fl.fl_ops->fl_release_private(&fl);
41035+ filp->f_op->flock(filp, F_SETLKW, &flock);
41036+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
41037+ flock.fl_ops->fl_release_private(&flock);
41038 }
41039
41040 lock_flocks();
41041diff -urNp linux-3.0.4/fs/logfs/super.c linux-3.0.4/fs/logfs/super.c
41042--- linux-3.0.4/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
41043+++ linux-3.0.4/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
41044@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
41045 struct logfs_disk_super _ds1, *ds1 = &_ds1;
41046 int err, valid0, valid1;
41047
41048+ pax_track_stack();
41049+
41050 /* read first superblock */
41051 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
41052 if (err)
41053diff -urNp linux-3.0.4/fs/namei.c linux-3.0.4/fs/namei.c
41054--- linux-3.0.4/fs/namei.c 2011-07-21 22:17:23.000000000 -0400
41055+++ linux-3.0.4/fs/namei.c 2011-08-23 21:48:14.000000000 -0400
41056@@ -237,21 +237,31 @@ int generic_permission(struct inode *ino
41057 return ret;
41058
41059 /*
41060- * Read/write DACs are always overridable.
41061- * Executable DACs are overridable for all directories and
41062- * for non-directories that have least one exec bit set.
41063+ * Searching includes executable on directories, else just read.
41064 */
41065- if (!(mask & MAY_EXEC) || execute_ok(inode))
41066- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
41067+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
41068+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
41069+#ifdef CONFIG_GRKERNSEC
41070+ if (flags & IPERM_FLAG_RCU)
41071+ return -ECHILD;
41072+#endif
41073+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
41074 return 0;
41075+ }
41076
41077 /*
41078- * Searching includes executable on directories, else just read.
41079+ * Read/write DACs are always overridable.
41080+ * Executable DACs are overridable for all directories and
41081+ * for non-directories that have least one exec bit set.
41082 */
41083- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
41084- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
41085- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
41086+ if (!(mask & MAY_EXEC) || execute_ok(inode)) {
41087+#ifdef CONFIG_GRKERNSEC
41088+ if (flags & IPERM_FLAG_RCU)
41089+ return -ECHILD;
41090+#endif
41091+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
41092 return 0;
41093+ }
41094
41095 return -EACCES;
41096 }
41097@@ -547,6 +557,9 @@ static int complete_walk(struct nameidat
41098 br_read_unlock(vfsmount_lock);
41099 }
41100
41101+ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
41102+ return -ENOENT;
41103+
41104 if (likely(!(nd->flags & LOOKUP_JUMPED)))
41105 return 0;
41106
41107@@ -593,9 +606,16 @@ static inline int exec_permission(struct
41108 if (ret == -ECHILD)
41109 return ret;
41110
41111- if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
41112- ns_capable(ns, CAP_DAC_READ_SEARCH))
41113+ if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
41114 goto ok;
41115+ else {
41116+#ifdef CONFIG_GRKERNSEC
41117+ if (flags & IPERM_FLAG_RCU)
41118+ return -ECHILD;
41119+#endif
41120+ if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
41121+ goto ok;
41122+ }
41123
41124 return ret;
41125 ok:
41126@@ -703,11 +723,19 @@ follow_link(struct path *link, struct na
41127 return error;
41128 }
41129
41130+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
41131+ dentry->d_inode, dentry, nd->path.mnt)) {
41132+ error = -EACCES;
41133+ *p = ERR_PTR(error); /* no ->put_link(), please */
41134+ path_put(&nd->path);
41135+ return error;
41136+ }
41137+
41138 nd->last_type = LAST_BIND;
41139 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
41140 error = PTR_ERR(*p);
41141 if (!IS_ERR(*p)) {
41142- char *s = nd_get_link(nd);
41143+ const char *s = nd_get_link(nd);
41144 error = 0;
41145 if (s)
41146 error = __vfs_follow_link(nd, s);
41147@@ -1625,6 +1653,9 @@ static int do_path_lookup(int dfd, const
41148 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
41149
41150 if (likely(!retval)) {
41151+ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
41152+ return -ENOENT;
41153+
41154 if (unlikely(!audit_dummy_context())) {
41155 if (nd->path.dentry && nd->inode)
41156 audit_inode(name, nd->path.dentry);
41157@@ -1935,6 +1966,30 @@ int vfs_create(struct inode *dir, struct
41158 return error;
41159 }
41160
41161+/*
41162+ * Note that while the flag value (low two bits) for sys_open means:
41163+ * 00 - read-only
41164+ * 01 - write-only
41165+ * 10 - read-write
41166+ * 11 - special
41167+ * it is changed into
41168+ * 00 - no permissions needed
41169+ * 01 - read-permission
41170+ * 10 - write-permission
41171+ * 11 - read-write
41172+ * for the internal routines (ie open_namei()/follow_link() etc)
41173+ * This is more logical, and also allows the 00 "no perm needed"
41174+ * to be used for symlinks (where the permissions are checked
41175+ * later).
41176+ *
41177+*/
41178+static inline int open_to_namei_flags(int flag)
41179+{
41180+ if ((flag+1) & O_ACCMODE)
41181+ flag++;
41182+ return flag;
41183+}
41184+
41185 static int may_open(struct path *path, int acc_mode, int flag)
41186 {
41187 struct dentry *dentry = path->dentry;
41188@@ -1987,7 +2042,27 @@ static int may_open(struct path *path, i
41189 /*
41190 * Ensure there are no outstanding leases on the file.
41191 */
41192- return break_lease(inode, flag);
41193+ error = break_lease(inode, flag);
41194+
41195+ if (error)
41196+ return error;
41197+
41198+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
41199+ error = -EPERM;
41200+ goto exit;
41201+ }
41202+
41203+ if (gr_handle_rawio(inode)) {
41204+ error = -EPERM;
41205+ goto exit;
41206+ }
41207+
41208+ if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
41209+ error = -EACCES;
41210+ goto exit;
41211+ }
41212+exit:
41213+ return error;
41214 }
41215
41216 static int handle_truncate(struct file *filp)
41217@@ -2013,30 +2088,6 @@ static int handle_truncate(struct file *
41218 }
41219
41220 /*
41221- * Note that while the flag value (low two bits) for sys_open means:
41222- * 00 - read-only
41223- * 01 - write-only
41224- * 10 - read-write
41225- * 11 - special
41226- * it is changed into
41227- * 00 - no permissions needed
41228- * 01 - read-permission
41229- * 10 - write-permission
41230- * 11 - read-write
41231- * for the internal routines (ie open_namei()/follow_link() etc)
41232- * This is more logical, and also allows the 00 "no perm needed"
41233- * to be used for symlinks (where the permissions are checked
41234- * later).
41235- *
41236-*/
41237-static inline int open_to_namei_flags(int flag)
41238-{
41239- if ((flag+1) & O_ACCMODE)
41240- flag++;
41241- return flag;
41242-}
41243-
41244-/*
41245 * Handle the last step of open()
41246 */
41247 static struct file *do_last(struct nameidata *nd, struct path *path,
41248@@ -2045,6 +2096,7 @@ static struct file *do_last(struct namei
41249 struct dentry *dir = nd->path.dentry;
41250 struct dentry *dentry;
41251 int open_flag = op->open_flag;
41252+ int flag = open_to_namei_flags(open_flag);
41253 int will_truncate = open_flag & O_TRUNC;
41254 int want_write = 0;
41255 int acc_mode = op->acc_mode;
41256@@ -2132,6 +2184,12 @@ static struct file *do_last(struct namei
41257 /* Negative dentry, just create the file */
41258 if (!dentry->d_inode) {
41259 int mode = op->mode;
41260+
41261+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
41262+ error = -EACCES;
41263+ goto exit_mutex_unlock;
41264+ }
41265+
41266 if (!IS_POSIXACL(dir->d_inode))
41267 mode &= ~current_umask();
41268 /*
41269@@ -2155,6 +2213,8 @@ static struct file *do_last(struct namei
41270 error = vfs_create(dir->d_inode, dentry, mode, nd);
41271 if (error)
41272 goto exit_mutex_unlock;
41273+ else
41274+ gr_handle_create(path->dentry, path->mnt);
41275 mutex_unlock(&dir->d_inode->i_mutex);
41276 dput(nd->path.dentry);
41277 nd->path.dentry = dentry;
41278@@ -2164,6 +2224,14 @@ static struct file *do_last(struct namei
41279 /*
41280 * It already exists.
41281 */
41282+
41283+ /* only check if O_CREAT is specified, all other checks need to go
41284+ into may_open */
41285+ if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
41286+ error = -EACCES;
41287+ goto exit_mutex_unlock;
41288+ }
41289+
41290 mutex_unlock(&dir->d_inode->i_mutex);
41291 audit_inode(pathname, path->dentry);
41292
41293@@ -2450,6 +2518,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41294 error = may_mknod(mode);
41295 if (error)
41296 goto out_dput;
41297+
41298+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
41299+ error = -EPERM;
41300+ goto out_dput;
41301+ }
41302+
41303+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
41304+ error = -EACCES;
41305+ goto out_dput;
41306+ }
41307+
41308 error = mnt_want_write(nd.path.mnt);
41309 if (error)
41310 goto out_dput;
41311@@ -2470,6 +2549,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41312 }
41313 out_drop_write:
41314 mnt_drop_write(nd.path.mnt);
41315+
41316+ if (!error)
41317+ gr_handle_create(dentry, nd.path.mnt);
41318 out_dput:
41319 dput(dentry);
41320 out_unlock:
41321@@ -2522,6 +2604,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41322 if (IS_ERR(dentry))
41323 goto out_unlock;
41324
41325+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
41326+ error = -EACCES;
41327+ goto out_dput;
41328+ }
41329+
41330 if (!IS_POSIXACL(nd.path.dentry->d_inode))
41331 mode &= ~current_umask();
41332 error = mnt_want_write(nd.path.mnt);
41333@@ -2533,6 +2620,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41334 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
41335 out_drop_write:
41336 mnt_drop_write(nd.path.mnt);
41337+
41338+ if (!error)
41339+ gr_handle_create(dentry, nd.path.mnt);
41340+
41341 out_dput:
41342 dput(dentry);
41343 out_unlock:
41344@@ -2613,6 +2704,8 @@ static long do_rmdir(int dfd, const char
41345 char * name;
41346 struct dentry *dentry;
41347 struct nameidata nd;
41348+ ino_t saved_ino = 0;
41349+ dev_t saved_dev = 0;
41350
41351 error = user_path_parent(dfd, pathname, &nd, &name);
41352 if (error)
41353@@ -2641,6 +2734,17 @@ static long do_rmdir(int dfd, const char
41354 error = -ENOENT;
41355 goto exit3;
41356 }
41357+
41358+ if (dentry->d_inode->i_nlink <= 1) {
41359+ saved_ino = dentry->d_inode->i_ino;
41360+ saved_dev = gr_get_dev_from_dentry(dentry);
41361+ }
41362+
41363+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
41364+ error = -EACCES;
41365+ goto exit3;
41366+ }
41367+
41368 error = mnt_want_write(nd.path.mnt);
41369 if (error)
41370 goto exit3;
41371@@ -2648,6 +2752,8 @@ static long do_rmdir(int dfd, const char
41372 if (error)
41373 goto exit4;
41374 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
41375+ if (!error && (saved_dev || saved_ino))
41376+ gr_handle_delete(saved_ino, saved_dev);
41377 exit4:
41378 mnt_drop_write(nd.path.mnt);
41379 exit3:
41380@@ -2710,6 +2816,8 @@ static long do_unlinkat(int dfd, const c
41381 struct dentry *dentry;
41382 struct nameidata nd;
41383 struct inode *inode = NULL;
41384+ ino_t saved_ino = 0;
41385+ dev_t saved_dev = 0;
41386
41387 error = user_path_parent(dfd, pathname, &nd, &name);
41388 if (error)
41389@@ -2732,6 +2840,16 @@ static long do_unlinkat(int dfd, const c
41390 if (!inode)
41391 goto slashes;
41392 ihold(inode);
41393+
41394+ if (inode->i_nlink <= 1) {
41395+ saved_ino = inode->i_ino;
41396+ saved_dev = gr_get_dev_from_dentry(dentry);
41397+ }
41398+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
41399+ error = -EACCES;
41400+ goto exit2;
41401+ }
41402+
41403 error = mnt_want_write(nd.path.mnt);
41404 if (error)
41405 goto exit2;
41406@@ -2739,6 +2857,8 @@ static long do_unlinkat(int dfd, const c
41407 if (error)
41408 goto exit3;
41409 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
41410+ if (!error && (saved_ino || saved_dev))
41411+ gr_handle_delete(saved_ino, saved_dev);
41412 exit3:
41413 mnt_drop_write(nd.path.mnt);
41414 exit2:
41415@@ -2816,6 +2936,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
41416 if (IS_ERR(dentry))
41417 goto out_unlock;
41418
41419+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
41420+ error = -EACCES;
41421+ goto out_dput;
41422+ }
41423+
41424 error = mnt_want_write(nd.path.mnt);
41425 if (error)
41426 goto out_dput;
41427@@ -2823,6 +2948,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
41428 if (error)
41429 goto out_drop_write;
41430 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
41431+ if (!error)
41432+ gr_handle_create(dentry, nd.path.mnt);
41433 out_drop_write:
41434 mnt_drop_write(nd.path.mnt);
41435 out_dput:
41436@@ -2931,6 +3058,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41437 error = PTR_ERR(new_dentry);
41438 if (IS_ERR(new_dentry))
41439 goto out_unlock;
41440+
41441+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
41442+ old_path.dentry->d_inode,
41443+ old_path.dentry->d_inode->i_mode, to)) {
41444+ error = -EACCES;
41445+ goto out_dput;
41446+ }
41447+
41448+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
41449+ old_path.dentry, old_path.mnt, to)) {
41450+ error = -EACCES;
41451+ goto out_dput;
41452+ }
41453+
41454 error = mnt_want_write(nd.path.mnt);
41455 if (error)
41456 goto out_dput;
41457@@ -2938,6 +3079,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41458 if (error)
41459 goto out_drop_write;
41460 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
41461+ if (!error)
41462+ gr_handle_create(new_dentry, nd.path.mnt);
41463 out_drop_write:
41464 mnt_drop_write(nd.path.mnt);
41465 out_dput:
41466@@ -3113,6 +3256,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41467 char *to;
41468 int error;
41469
41470+ pax_track_stack();
41471+
41472 error = user_path_parent(olddfd, oldname, &oldnd, &from);
41473 if (error)
41474 goto exit;
41475@@ -3169,6 +3314,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41476 if (new_dentry == trap)
41477 goto exit5;
41478
41479+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
41480+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
41481+ to);
41482+ if (error)
41483+ goto exit5;
41484+
41485 error = mnt_want_write(oldnd.path.mnt);
41486 if (error)
41487 goto exit5;
41488@@ -3178,6 +3329,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41489 goto exit6;
41490 error = vfs_rename(old_dir->d_inode, old_dentry,
41491 new_dir->d_inode, new_dentry);
41492+ if (!error)
41493+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
41494+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
41495 exit6:
41496 mnt_drop_write(oldnd.path.mnt);
41497 exit5:
41498@@ -3203,6 +3357,8 @@ SYSCALL_DEFINE2(rename, const char __use
41499
41500 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
41501 {
41502+ char tmpbuf[64];
41503+ const char *newlink;
41504 int len;
41505
41506 len = PTR_ERR(link);
41507@@ -3212,7 +3368,14 @@ int vfs_readlink(struct dentry *dentry,
41508 len = strlen(link);
41509 if (len > (unsigned) buflen)
41510 len = buflen;
41511- if (copy_to_user(buffer, link, len))
41512+
41513+ if (len < sizeof(tmpbuf)) {
41514+ memcpy(tmpbuf, link, len);
41515+ newlink = tmpbuf;
41516+ } else
41517+ newlink = link;
41518+
41519+ if (copy_to_user(buffer, newlink, len))
41520 len = -EFAULT;
41521 out:
41522 return len;
41523diff -urNp linux-3.0.4/fs/namespace.c linux-3.0.4/fs/namespace.c
41524--- linux-3.0.4/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
41525+++ linux-3.0.4/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
41526@@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
41527 if (!(sb->s_flags & MS_RDONLY))
41528 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
41529 up_write(&sb->s_umount);
41530+
41531+ gr_log_remount(mnt->mnt_devname, retval);
41532+
41533 return retval;
41534 }
41535
41536@@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
41537 br_write_unlock(vfsmount_lock);
41538 up_write(&namespace_sem);
41539 release_mounts(&umount_list);
41540+
41541+ gr_log_unmount(mnt->mnt_devname, retval);
41542+
41543 return retval;
41544 }
41545
41546@@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
41547 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
41548 MS_STRICTATIME);
41549
41550+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
41551+ retval = -EPERM;
41552+ goto dput_out;
41553+ }
41554+
41555+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
41556+ retval = -EPERM;
41557+ goto dput_out;
41558+ }
41559+
41560 if (flags & MS_REMOUNT)
41561 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
41562 data_page);
41563@@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
41564 dev_name, data_page);
41565 dput_out:
41566 path_put(&path);
41567+
41568+ gr_log_mount(dev_name, dir_name, retval);
41569+
41570 return retval;
41571 }
41572
41573@@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
41574 if (error)
41575 goto out2;
41576
41577+ if (gr_handle_chroot_pivot()) {
41578+ error = -EPERM;
41579+ goto out2;
41580+ }
41581+
41582 get_fs_root(current->fs, &root);
41583 error = lock_mount(&old);
41584 if (error)
41585diff -urNp linux-3.0.4/fs/ncpfs/dir.c linux-3.0.4/fs/ncpfs/dir.c
41586--- linux-3.0.4/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
41587+++ linux-3.0.4/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
41588@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
41589 int res, val = 0, len;
41590 __u8 __name[NCP_MAXPATHLEN + 1];
41591
41592+ pax_track_stack();
41593+
41594 if (dentry == dentry->d_sb->s_root)
41595 return 1;
41596
41597@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
41598 int error, res, len;
41599 __u8 __name[NCP_MAXPATHLEN + 1];
41600
41601+ pax_track_stack();
41602+
41603 error = -EIO;
41604 if (!ncp_conn_valid(server))
41605 goto finished;
41606@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
41607 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
41608 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
41609
41610+ pax_track_stack();
41611+
41612 ncp_age_dentry(server, dentry);
41613 len = sizeof(__name);
41614 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
41615@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
41616 int error, len;
41617 __u8 __name[NCP_MAXPATHLEN + 1];
41618
41619+ pax_track_stack();
41620+
41621 DPRINTK("ncp_mkdir: making %s/%s\n",
41622 dentry->d_parent->d_name.name, dentry->d_name.name);
41623
41624@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
41625 int old_len, new_len;
41626 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
41627
41628+ pax_track_stack();
41629+
41630 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
41631 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
41632 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
41633diff -urNp linux-3.0.4/fs/ncpfs/inode.c linux-3.0.4/fs/ncpfs/inode.c
41634--- linux-3.0.4/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
41635+++ linux-3.0.4/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
41636@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
41637 #endif
41638 struct ncp_entry_info finfo;
41639
41640+ pax_track_stack();
41641+
41642 memset(&data, 0, sizeof(data));
41643 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
41644 if (!server)
41645diff -urNp linux-3.0.4/fs/nfs/inode.c linux-3.0.4/fs/nfs/inode.c
41646--- linux-3.0.4/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
41647+++ linux-3.0.4/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
41648@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
41649 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
41650 nfsi->attrtimeo_timestamp = jiffies;
41651
41652- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
41653+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
41654 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
41655 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
41656 else
41657@@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
41658 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
41659 }
41660
41661-static atomic_long_t nfs_attr_generation_counter;
41662+static atomic_long_unchecked_t nfs_attr_generation_counter;
41663
41664 static unsigned long nfs_read_attr_generation_counter(void)
41665 {
41666- return atomic_long_read(&nfs_attr_generation_counter);
41667+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
41668 }
41669
41670 unsigned long nfs_inc_attr_generation_counter(void)
41671 {
41672- return atomic_long_inc_return(&nfs_attr_generation_counter);
41673+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
41674 }
41675
41676 void nfs_fattr_init(struct nfs_fattr *fattr)
41677diff -urNp linux-3.0.4/fs/nfsd/nfs4state.c linux-3.0.4/fs/nfsd/nfs4state.c
41678--- linux-3.0.4/fs/nfsd/nfs4state.c 2011-08-23 21:44:40.000000000 -0400
41679+++ linux-3.0.4/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
41680@@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
41681 unsigned int strhashval;
41682 int err;
41683
41684+ pax_track_stack();
41685+
41686 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
41687 (long long) lock->lk_offset,
41688 (long long) lock->lk_length);
41689diff -urNp linux-3.0.4/fs/nfsd/nfs4xdr.c linux-3.0.4/fs/nfsd/nfs4xdr.c
41690--- linux-3.0.4/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
41691+++ linux-3.0.4/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
41692@@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
41693 .dentry = dentry,
41694 };
41695
41696+ pax_track_stack();
41697+
41698 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
41699 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
41700 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
41701diff -urNp linux-3.0.4/fs/nfsd/vfs.c linux-3.0.4/fs/nfsd/vfs.c
41702--- linux-3.0.4/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
41703+++ linux-3.0.4/fs/nfsd/vfs.c 2011-08-23 21:47:56.000000000 -0400
41704@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
41705 } else {
41706 oldfs = get_fs();
41707 set_fs(KERNEL_DS);
41708- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
41709+ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
41710 set_fs(oldfs);
41711 }
41712
41713@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
41714
41715 /* Write the data. */
41716 oldfs = get_fs(); set_fs(KERNEL_DS);
41717- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
41718+ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
41719 set_fs(oldfs);
41720 if (host_err < 0)
41721 goto out_nfserr;
41722@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
41723 */
41724
41725 oldfs = get_fs(); set_fs(KERNEL_DS);
41726- host_err = inode->i_op->readlink(dentry, buf, *lenp);
41727+ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
41728 set_fs(oldfs);
41729
41730 if (host_err < 0)
41731diff -urNp linux-3.0.4/fs/notify/fanotify/fanotify_user.c linux-3.0.4/fs/notify/fanotify/fanotify_user.c
41732--- linux-3.0.4/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
41733+++ linux-3.0.4/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
41734@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
41735 goto out_close_fd;
41736
41737 ret = -EFAULT;
41738- if (copy_to_user(buf, &fanotify_event_metadata,
41739+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
41740+ copy_to_user(buf, &fanotify_event_metadata,
41741 fanotify_event_metadata.event_len))
41742 goto out_kill_access_response;
41743
41744diff -urNp linux-3.0.4/fs/notify/notification.c linux-3.0.4/fs/notify/notification.c
41745--- linux-3.0.4/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
41746+++ linux-3.0.4/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
41747@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
41748 * get set to 0 so it will never get 'freed'
41749 */
41750 static struct fsnotify_event *q_overflow_event;
41751-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41752+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41753
41754 /**
41755 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
41756@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
41757 */
41758 u32 fsnotify_get_cookie(void)
41759 {
41760- return atomic_inc_return(&fsnotify_sync_cookie);
41761+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
41762 }
41763 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
41764
41765diff -urNp linux-3.0.4/fs/ntfs/dir.c linux-3.0.4/fs/ntfs/dir.c
41766--- linux-3.0.4/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
41767+++ linux-3.0.4/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
41768@@ -1329,7 +1329,7 @@ find_next_index_buffer:
41769 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
41770 ~(s64)(ndir->itype.index.block_size - 1)));
41771 /* Bounds checks. */
41772- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41773+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41774 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
41775 "inode 0x%lx or driver bug.", vdir->i_ino);
41776 goto err_out;
41777diff -urNp linux-3.0.4/fs/ntfs/file.c linux-3.0.4/fs/ntfs/file.c
41778--- linux-3.0.4/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
41779+++ linux-3.0.4/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
41780@@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
41781 #endif /* NTFS_RW */
41782 };
41783
41784-const struct file_operations ntfs_empty_file_ops = {};
41785+const struct file_operations ntfs_empty_file_ops __read_only;
41786
41787-const struct inode_operations ntfs_empty_inode_ops = {};
41788+const struct inode_operations ntfs_empty_inode_ops __read_only;
41789diff -urNp linux-3.0.4/fs/ocfs2/localalloc.c linux-3.0.4/fs/ocfs2/localalloc.c
41790--- linux-3.0.4/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
41791+++ linux-3.0.4/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
41792@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
41793 goto bail;
41794 }
41795
41796- atomic_inc(&osb->alloc_stats.moves);
41797+ atomic_inc_unchecked(&osb->alloc_stats.moves);
41798
41799 bail:
41800 if (handle)
41801diff -urNp linux-3.0.4/fs/ocfs2/namei.c linux-3.0.4/fs/ocfs2/namei.c
41802--- linux-3.0.4/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
41803+++ linux-3.0.4/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
41804@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
41805 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
41806 struct ocfs2_dir_lookup_result target_insert = { NULL, };
41807
41808+ pax_track_stack();
41809+
41810 /* At some point it might be nice to break this function up a
41811 * bit. */
41812
41813diff -urNp linux-3.0.4/fs/ocfs2/ocfs2.h linux-3.0.4/fs/ocfs2/ocfs2.h
41814--- linux-3.0.4/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
41815+++ linux-3.0.4/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
41816@@ -235,11 +235,11 @@ enum ocfs2_vol_state
41817
41818 struct ocfs2_alloc_stats
41819 {
41820- atomic_t moves;
41821- atomic_t local_data;
41822- atomic_t bitmap_data;
41823- atomic_t bg_allocs;
41824- atomic_t bg_extends;
41825+ atomic_unchecked_t moves;
41826+ atomic_unchecked_t local_data;
41827+ atomic_unchecked_t bitmap_data;
41828+ atomic_unchecked_t bg_allocs;
41829+ atomic_unchecked_t bg_extends;
41830 };
41831
41832 enum ocfs2_local_alloc_state
41833diff -urNp linux-3.0.4/fs/ocfs2/suballoc.c linux-3.0.4/fs/ocfs2/suballoc.c
41834--- linux-3.0.4/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
41835+++ linux-3.0.4/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
41836@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
41837 mlog_errno(status);
41838 goto bail;
41839 }
41840- atomic_inc(&osb->alloc_stats.bg_extends);
41841+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
41842
41843 /* You should never ask for this much metadata */
41844 BUG_ON(bits_wanted >
41845@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
41846 mlog_errno(status);
41847 goto bail;
41848 }
41849- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41850+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41851
41852 *suballoc_loc = res.sr_bg_blkno;
41853 *suballoc_bit_start = res.sr_bit_offset;
41854@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
41855 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
41856 res->sr_bits);
41857
41858- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41859+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41860
41861 BUG_ON(res->sr_bits != 1);
41862
41863@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
41864 mlog_errno(status);
41865 goto bail;
41866 }
41867- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41868+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41869
41870 BUG_ON(res.sr_bits != 1);
41871
41872@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
41873 cluster_start,
41874 num_clusters);
41875 if (!status)
41876- atomic_inc(&osb->alloc_stats.local_data);
41877+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
41878 } else {
41879 if (min_clusters > (osb->bitmap_cpg - 1)) {
41880 /* The only paths asking for contiguousness
41881@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
41882 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
41883 res.sr_bg_blkno,
41884 res.sr_bit_offset);
41885- atomic_inc(&osb->alloc_stats.bitmap_data);
41886+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
41887 *num_clusters = res.sr_bits;
41888 }
41889 }
41890diff -urNp linux-3.0.4/fs/ocfs2/super.c linux-3.0.4/fs/ocfs2/super.c
41891--- linux-3.0.4/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
41892+++ linux-3.0.4/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
41893@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
41894 "%10s => GlobalAllocs: %d LocalAllocs: %d "
41895 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
41896 "Stats",
41897- atomic_read(&osb->alloc_stats.bitmap_data),
41898- atomic_read(&osb->alloc_stats.local_data),
41899- atomic_read(&osb->alloc_stats.bg_allocs),
41900- atomic_read(&osb->alloc_stats.moves),
41901- atomic_read(&osb->alloc_stats.bg_extends));
41902+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
41903+ atomic_read_unchecked(&osb->alloc_stats.local_data),
41904+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
41905+ atomic_read_unchecked(&osb->alloc_stats.moves),
41906+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
41907
41908 out += snprintf(buf + out, len - out,
41909 "%10s => State: %u Descriptor: %llu Size: %u bits "
41910@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
41911 spin_lock_init(&osb->osb_xattr_lock);
41912 ocfs2_init_steal_slots(osb);
41913
41914- atomic_set(&osb->alloc_stats.moves, 0);
41915- atomic_set(&osb->alloc_stats.local_data, 0);
41916- atomic_set(&osb->alloc_stats.bitmap_data, 0);
41917- atomic_set(&osb->alloc_stats.bg_allocs, 0);
41918- atomic_set(&osb->alloc_stats.bg_extends, 0);
41919+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
41920+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
41921+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
41922+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
41923+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
41924
41925 /* Copy the blockcheck stats from the superblock probe */
41926 osb->osb_ecc_stats = *stats;
41927diff -urNp linux-3.0.4/fs/ocfs2/symlink.c linux-3.0.4/fs/ocfs2/symlink.c
41928--- linux-3.0.4/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
41929+++ linux-3.0.4/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
41930@@ -142,7 +142,7 @@ bail:
41931
41932 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
41933 {
41934- char *link = nd_get_link(nd);
41935+ const char *link = nd_get_link(nd);
41936 if (!IS_ERR(link))
41937 kfree(link);
41938 }
41939diff -urNp linux-3.0.4/fs/open.c linux-3.0.4/fs/open.c
41940--- linux-3.0.4/fs/open.c 2011-07-21 22:17:23.000000000 -0400
41941+++ linux-3.0.4/fs/open.c 2011-08-23 21:48:14.000000000 -0400
41942@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
41943 error = locks_verify_truncate(inode, NULL, length);
41944 if (!error)
41945 error = security_path_truncate(&path);
41946+
41947+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
41948+ error = -EACCES;
41949+
41950 if (!error)
41951 error = do_truncate(path.dentry, length, 0, NULL);
41952
41953@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
41954 if (__mnt_is_readonly(path.mnt))
41955 res = -EROFS;
41956
41957+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
41958+ res = -EACCES;
41959+
41960 out_path_release:
41961 path_put(&path);
41962 out:
41963@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
41964 if (error)
41965 goto dput_and_out;
41966
41967+ gr_log_chdir(path.dentry, path.mnt);
41968+
41969 set_fs_pwd(current->fs, &path);
41970
41971 dput_and_out:
41972@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
41973 goto out_putf;
41974
41975 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
41976+
41977+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
41978+ error = -EPERM;
41979+
41980+ if (!error)
41981+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
41982+
41983 if (!error)
41984 set_fs_pwd(current->fs, &file->f_path);
41985 out_putf:
41986@@ -438,7 +454,18 @@ SYSCALL_DEFINE1(chroot, const char __use
41987 if (error)
41988 goto dput_and_out;
41989
41990+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
41991+ goto dput_and_out;
41992+
41993+ if (gr_handle_chroot_caps(&path)) {
41994+ error = -ENOMEM;
41995+ goto dput_and_out;
41996+ }
41997+
41998 set_fs_root(current->fs, &path);
41999+
42000+ gr_handle_chroot_chdir(&path);
42001+
42002 error = 0;
42003 dput_and_out:
42004 path_put(&path);
42005@@ -466,12 +493,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
42006 err = mnt_want_write_file(file);
42007 if (err)
42008 goto out_putf;
42009+
42010 mutex_lock(&inode->i_mutex);
42011+
42012+ if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
42013+ err = -EACCES;
42014+ goto out_unlock;
42015+ }
42016+
42017 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
42018 if (err)
42019 goto out_unlock;
42020 if (mode == (mode_t) -1)
42021 mode = inode->i_mode;
42022+
42023+ if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
42024+ err = -EACCES;
42025+ goto out_unlock;
42026+ }
42027+
42028 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42029 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42030 err = notify_change(dentry, &newattrs);
42031@@ -499,12 +539,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
42032 error = mnt_want_write(path.mnt);
42033 if (error)
42034 goto dput_and_out;
42035+
42036 mutex_lock(&inode->i_mutex);
42037+
42038+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
42039+ error = -EACCES;
42040+ goto out_unlock;
42041+ }
42042+
42043 error = security_path_chmod(path.dentry, path.mnt, mode);
42044 if (error)
42045 goto out_unlock;
42046 if (mode == (mode_t) -1)
42047 mode = inode->i_mode;
42048+
42049+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
42050+ error = -EACCES;
42051+ goto out_unlock;
42052+ }
42053+
42054 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42055 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42056 error = notify_change(path.dentry, &newattrs);
42057@@ -528,6 +581,9 @@ static int chown_common(struct path *pat
42058 int error;
42059 struct iattr newattrs;
42060
42061+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
42062+ return -EACCES;
42063+
42064 newattrs.ia_valid = ATTR_CTIME;
42065 if (user != (uid_t) -1) {
42066 newattrs.ia_valid |= ATTR_UID;
42067@@ -998,7 +1054,10 @@ long do_sys_open(int dfd, const char __u
42068 if (!IS_ERR(tmp)) {
42069 fd = get_unused_fd_flags(flags);
42070 if (fd >= 0) {
42071- struct file *f = do_filp_open(dfd, tmp, &op, lookup);
42072+ struct file *f;
42073+ /* don't allow to be set by userland */
42074+ flags &= ~FMODE_GREXEC;
42075+ f = do_filp_open(dfd, tmp, &op, lookup);
42076 if (IS_ERR(f)) {
42077 put_unused_fd(fd);
42078 fd = PTR_ERR(f);
42079diff -urNp linux-3.0.4/fs/partitions/ldm.c linux-3.0.4/fs/partitions/ldm.c
42080--- linux-3.0.4/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
42081+++ linux-3.0.4/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
42082@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
42083 ldm_error ("A VBLK claims to have %d parts.", num);
42084 return false;
42085 }
42086+
42087 if (rec >= num) {
42088 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
42089 return false;
42090@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
42091 goto found;
42092 }
42093
42094- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
42095+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
42096 if (!f) {
42097 ldm_crit ("Out of memory.");
42098 return false;
42099diff -urNp linux-3.0.4/fs/pipe.c linux-3.0.4/fs/pipe.c
42100--- linux-3.0.4/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
42101+++ linux-3.0.4/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
42102@@ -420,9 +420,9 @@ redo:
42103 }
42104 if (bufs) /* More to do? */
42105 continue;
42106- if (!pipe->writers)
42107+ if (!atomic_read(&pipe->writers))
42108 break;
42109- if (!pipe->waiting_writers) {
42110+ if (!atomic_read(&pipe->waiting_writers)) {
42111 /* syscall merging: Usually we must not sleep
42112 * if O_NONBLOCK is set, or if we got some data.
42113 * But if a writer sleeps in kernel space, then
42114@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
42115 mutex_lock(&inode->i_mutex);
42116 pipe = inode->i_pipe;
42117
42118- if (!pipe->readers) {
42119+ if (!atomic_read(&pipe->readers)) {
42120 send_sig(SIGPIPE, current, 0);
42121 ret = -EPIPE;
42122 goto out;
42123@@ -530,7 +530,7 @@ redo1:
42124 for (;;) {
42125 int bufs;
42126
42127- if (!pipe->readers) {
42128+ if (!atomic_read(&pipe->readers)) {
42129 send_sig(SIGPIPE, current, 0);
42130 if (!ret)
42131 ret = -EPIPE;
42132@@ -616,9 +616,9 @@ redo2:
42133 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42134 do_wakeup = 0;
42135 }
42136- pipe->waiting_writers++;
42137+ atomic_inc(&pipe->waiting_writers);
42138 pipe_wait(pipe);
42139- pipe->waiting_writers--;
42140+ atomic_dec(&pipe->waiting_writers);
42141 }
42142 out:
42143 mutex_unlock(&inode->i_mutex);
42144@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
42145 mask = 0;
42146 if (filp->f_mode & FMODE_READ) {
42147 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
42148- if (!pipe->writers && filp->f_version != pipe->w_counter)
42149+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
42150 mask |= POLLHUP;
42151 }
42152
42153@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
42154 * Most Unices do not set POLLERR for FIFOs but on Linux they
42155 * behave exactly like pipes for poll().
42156 */
42157- if (!pipe->readers)
42158+ if (!atomic_read(&pipe->readers))
42159 mask |= POLLERR;
42160 }
42161
42162@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
42163
42164 mutex_lock(&inode->i_mutex);
42165 pipe = inode->i_pipe;
42166- pipe->readers -= decr;
42167- pipe->writers -= decw;
42168+ atomic_sub(decr, &pipe->readers);
42169+ atomic_sub(decw, &pipe->writers);
42170
42171- if (!pipe->readers && !pipe->writers) {
42172+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
42173 free_pipe_info(inode);
42174 } else {
42175 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
42176@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
42177
42178 if (inode->i_pipe) {
42179 ret = 0;
42180- inode->i_pipe->readers++;
42181+ atomic_inc(&inode->i_pipe->readers);
42182 }
42183
42184 mutex_unlock(&inode->i_mutex);
42185@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
42186
42187 if (inode->i_pipe) {
42188 ret = 0;
42189- inode->i_pipe->writers++;
42190+ atomic_inc(&inode->i_pipe->writers);
42191 }
42192
42193 mutex_unlock(&inode->i_mutex);
42194@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
42195 if (inode->i_pipe) {
42196 ret = 0;
42197 if (filp->f_mode & FMODE_READ)
42198- inode->i_pipe->readers++;
42199+ atomic_inc(&inode->i_pipe->readers);
42200 if (filp->f_mode & FMODE_WRITE)
42201- inode->i_pipe->writers++;
42202+ atomic_inc(&inode->i_pipe->writers);
42203 }
42204
42205 mutex_unlock(&inode->i_mutex);
42206@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
42207 inode->i_pipe = NULL;
42208 }
42209
42210-static struct vfsmount *pipe_mnt __read_mostly;
42211+struct vfsmount *pipe_mnt __read_mostly;
42212
42213 /*
42214 * pipefs_dname() is called from d_path().
42215@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
42216 goto fail_iput;
42217 inode->i_pipe = pipe;
42218
42219- pipe->readers = pipe->writers = 1;
42220+ atomic_set(&pipe->readers, 1);
42221+ atomic_set(&pipe->writers, 1);
42222 inode->i_fop = &rdwr_pipefifo_fops;
42223
42224 /*
42225diff -urNp linux-3.0.4/fs/proc/array.c linux-3.0.4/fs/proc/array.c
42226--- linux-3.0.4/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
42227+++ linux-3.0.4/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
42228@@ -60,6 +60,7 @@
42229 #include <linux/tty.h>
42230 #include <linux/string.h>
42231 #include <linux/mman.h>
42232+#include <linux/grsecurity.h>
42233 #include <linux/proc_fs.h>
42234 #include <linux/ioport.h>
42235 #include <linux/uaccess.h>
42236@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
42237 seq_putc(m, '\n');
42238 }
42239
42240+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42241+static inline void task_pax(struct seq_file *m, struct task_struct *p)
42242+{
42243+ if (p->mm)
42244+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
42245+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
42246+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
42247+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
42248+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
42249+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
42250+ else
42251+ seq_printf(m, "PaX:\t-----\n");
42252+}
42253+#endif
42254+
42255 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
42256 struct pid *pid, struct task_struct *task)
42257 {
42258@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
42259 task_cpus_allowed(m, task);
42260 cpuset_task_status_allowed(m, task);
42261 task_context_switch_counts(m, task);
42262+
42263+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42264+ task_pax(m, task);
42265+#endif
42266+
42267+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
42268+ task_grsec_rbac(m, task);
42269+#endif
42270+
42271 return 0;
42272 }
42273
42274+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42275+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42276+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
42277+ _mm->pax_flags & MF_PAX_SEGMEXEC))
42278+#endif
42279+
42280 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
42281 struct pid *pid, struct task_struct *task, int whole)
42282 {
42283@@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
42284 cputime_t cutime, cstime, utime, stime;
42285 cputime_t cgtime, gtime;
42286 unsigned long rsslim = 0;
42287- char tcomm[sizeof(task->comm)];
42288+ char tcomm[sizeof(task->comm)] = { 0 };
42289 unsigned long flags;
42290
42291+ pax_track_stack();
42292+
42293 state = *get_task_state(task);
42294 vsize = eip = esp = 0;
42295 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
42296@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
42297 gtime = task->gtime;
42298 }
42299
42300+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42301+ if (PAX_RAND_FLAGS(mm)) {
42302+ eip = 0;
42303+ esp = 0;
42304+ wchan = 0;
42305+ }
42306+#endif
42307+#ifdef CONFIG_GRKERNSEC_HIDESYM
42308+ wchan = 0;
42309+ eip =0;
42310+ esp =0;
42311+#endif
42312+
42313 /* scale priority and nice values from timeslices to -20..20 */
42314 /* to make it look like a "normal" Unix priority/nice value */
42315 priority = task_prio(task);
42316@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
42317 vsize,
42318 mm ? get_mm_rss(mm) : 0,
42319 rsslim,
42320+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42321+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
42322+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
42323+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
42324+#else
42325 mm ? (permitted ? mm->start_code : 1) : 0,
42326 mm ? (permitted ? mm->end_code : 1) : 0,
42327 (permitted && mm) ? mm->start_stack : 0,
42328+#endif
42329 esp,
42330 eip,
42331 /* The signal information here is obsolete.
42332@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
42333
42334 return 0;
42335 }
42336+
42337+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42338+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
42339+{
42340+ u32 curr_ip = 0;
42341+ unsigned long flags;
42342+
42343+ if (lock_task_sighand(task, &flags)) {
42344+ curr_ip = task->signal->curr_ip;
42345+ unlock_task_sighand(task, &flags);
42346+ }
42347+
42348+ return sprintf(buffer, "%pI4\n", &curr_ip);
42349+}
42350+#endif
42351diff -urNp linux-3.0.4/fs/proc/base.c linux-3.0.4/fs/proc/base.c
42352--- linux-3.0.4/fs/proc/base.c 2011-08-23 21:44:40.000000000 -0400
42353+++ linux-3.0.4/fs/proc/base.c 2011-08-23 21:48:14.000000000 -0400
42354@@ -107,6 +107,22 @@ struct pid_entry {
42355 union proc_op op;
42356 };
42357
42358+struct getdents_callback {
42359+ struct linux_dirent __user * current_dir;
42360+ struct linux_dirent __user * previous;
42361+ struct file * file;
42362+ int count;
42363+ int error;
42364+};
42365+
42366+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
42367+ loff_t offset, u64 ino, unsigned int d_type)
42368+{
42369+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
42370+ buf->error = -EINVAL;
42371+ return 0;
42372+}
42373+
42374 #define NOD(NAME, MODE, IOP, FOP, OP) { \
42375 .name = (NAME), \
42376 .len = sizeof(NAME) - 1, \
42377@@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
42378 if (task == current)
42379 return mm;
42380
42381+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
42382+ return ERR_PTR(-EPERM);
42383+
42384 /*
42385 * If current is actively ptrace'ing, and would also be
42386 * permitted to freshly attach with ptrace now, permit it.
42387@@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
42388 if (!mm->arg_end)
42389 goto out_mm; /* Shh! No looking before we're done */
42390
42391+ if (gr_acl_handle_procpidmem(task))
42392+ goto out_mm;
42393+
42394 len = mm->arg_end - mm->arg_start;
42395
42396 if (len > PAGE_SIZE)
42397@@ -309,12 +331,28 @@ out:
42398 return res;
42399 }
42400
42401+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42402+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42403+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
42404+ _mm->pax_flags & MF_PAX_SEGMEXEC))
42405+#endif
42406+
42407 static int proc_pid_auxv(struct task_struct *task, char *buffer)
42408 {
42409 struct mm_struct *mm = mm_for_maps(task);
42410 int res = PTR_ERR(mm);
42411 if (mm && !IS_ERR(mm)) {
42412 unsigned int nwords = 0;
42413+
42414+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42415+ /* allow if we're currently ptracing this task */
42416+ if (PAX_RAND_FLAGS(mm) &&
42417+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
42418+ mmput(mm);
42419+ return res;
42420+ }
42421+#endif
42422+
42423 do {
42424 nwords += 2;
42425 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
42426@@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
42427 }
42428
42429
42430-#ifdef CONFIG_KALLSYMS
42431+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42432 /*
42433 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
42434 * Returns the resolved symbol. If that fails, simply return the address.
42435@@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
42436 mutex_unlock(&task->signal->cred_guard_mutex);
42437 }
42438
42439-#ifdef CONFIG_STACKTRACE
42440+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42441
42442 #define MAX_STACK_TRACE_DEPTH 64
42443
42444@@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
42445 return count;
42446 }
42447
42448-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42449+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42450 static int proc_pid_syscall(struct task_struct *task, char *buffer)
42451 {
42452 long nr;
42453@@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
42454 /************************************************************************/
42455
42456 /* permission checks */
42457-static int proc_fd_access_allowed(struct inode *inode)
42458+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
42459 {
42460 struct task_struct *task;
42461 int allowed = 0;
42462@@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
42463 */
42464 task = get_proc_task(inode);
42465 if (task) {
42466- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42467+ if (log)
42468+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
42469+ else
42470+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42471 put_task_struct(task);
42472 }
42473 return allowed;
42474@@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
42475 if (!task)
42476 goto out_no_task;
42477
42478+ if (gr_acl_handle_procpidmem(task))
42479+ goto out;
42480+
42481 ret = -ENOMEM;
42482 page = (char *)__get_free_page(GFP_TEMPORARY);
42483 if (!page)
42484@@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
42485 path_put(&nd->path);
42486
42487 /* Are we allowed to snoop on the tasks file descriptors? */
42488- if (!proc_fd_access_allowed(inode))
42489+ if (!proc_fd_access_allowed(inode,0))
42490 goto out;
42491
42492 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
42493@@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
42494 struct path path;
42495
42496 /* Are we allowed to snoop on the tasks file descriptors? */
42497- if (!proc_fd_access_allowed(inode))
42498- goto out;
42499+ /* logging this is needed for learning on chromium to work properly,
42500+ but we don't want to flood the logs from 'ps' which does a readlink
42501+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
42502+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
42503+ */
42504+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
42505+ if (!proc_fd_access_allowed(inode,0))
42506+ goto out;
42507+ } else {
42508+ if (!proc_fd_access_allowed(inode,1))
42509+ goto out;
42510+ }
42511
42512 error = PROC_I(inode)->op.proc_get_link(inode, &path);
42513 if (error)
42514@@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
42515 rcu_read_lock();
42516 cred = __task_cred(task);
42517 inode->i_uid = cred->euid;
42518+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42519+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42520+#else
42521 inode->i_gid = cred->egid;
42522+#endif
42523 rcu_read_unlock();
42524 }
42525 security_task_to_inode(task, inode);
42526@@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
42527 struct inode *inode = dentry->d_inode;
42528 struct task_struct *task;
42529 const struct cred *cred;
42530+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42531+ const struct cred *tmpcred = current_cred();
42532+#endif
42533
42534 generic_fillattr(inode, stat);
42535
42536@@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
42537 stat->uid = 0;
42538 stat->gid = 0;
42539 task = pid_task(proc_pid(inode), PIDTYPE_PID);
42540+
42541+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
42542+ rcu_read_unlock();
42543+ return -ENOENT;
42544+ }
42545+
42546 if (task) {
42547+ cred = __task_cred(task);
42548+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42549+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
42550+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42551+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42552+#endif
42553+ ) {
42554+#endif
42555 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42556+#ifdef CONFIG_GRKERNSEC_PROC_USER
42557+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42558+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42559+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42560+#endif
42561 task_dumpable(task)) {
42562- cred = __task_cred(task);
42563 stat->uid = cred->euid;
42564+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42565+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
42566+#else
42567 stat->gid = cred->egid;
42568+#endif
42569 }
42570+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42571+ } else {
42572+ rcu_read_unlock();
42573+ return -ENOENT;
42574+ }
42575+#endif
42576 }
42577 rcu_read_unlock();
42578 return 0;
42579@@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
42580
42581 if (task) {
42582 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42583+#ifdef CONFIG_GRKERNSEC_PROC_USER
42584+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42585+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42586+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42587+#endif
42588 task_dumpable(task)) {
42589 rcu_read_lock();
42590 cred = __task_cred(task);
42591 inode->i_uid = cred->euid;
42592+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42593+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42594+#else
42595 inode->i_gid = cred->egid;
42596+#endif
42597 rcu_read_unlock();
42598 } else {
42599 inode->i_uid = 0;
42600@@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
42601 int fd = proc_fd(inode);
42602
42603 if (task) {
42604- files = get_files_struct(task);
42605+ if (!gr_acl_handle_procpidmem(task))
42606+ files = get_files_struct(task);
42607 put_task_struct(task);
42608 }
42609 if (files) {
42610@@ -2169,11 +2268,21 @@ static const struct file_operations proc
42611 */
42612 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
42613 {
42614+ struct task_struct *task;
42615 int rv = generic_permission(inode, mask, flags, NULL);
42616- if (rv == 0)
42617- return 0;
42618+
42619 if (task_pid(current) == proc_pid(inode))
42620 rv = 0;
42621+
42622+ task = get_proc_task(inode);
42623+ if (task == NULL)
42624+ return rv;
42625+
42626+ if (gr_acl_handle_procpidmem(task))
42627+ rv = -EACCES;
42628+
42629+ put_task_struct(task);
42630+
42631 return rv;
42632 }
42633
42634@@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
42635 if (!task)
42636 goto out_no_task;
42637
42638+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42639+ goto out;
42640+
42641 /*
42642 * Yes, it does not scale. And it should not. Don't add
42643 * new entries into /proc/<tgid>/ without very good reasons.
42644@@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
42645 if (!task)
42646 goto out_no_task;
42647
42648+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42649+ goto out;
42650+
42651 ret = 0;
42652 i = filp->f_pos;
42653 switch (i) {
42654@@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
42655 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
42656 void *cookie)
42657 {
42658- char *s = nd_get_link(nd);
42659+ const char *s = nd_get_link(nd);
42660 if (!IS_ERR(s))
42661 __putname(s);
42662 }
42663@@ -2795,7 +2910,7 @@ static const struct pid_entry tgid_base_
42664 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
42665 #endif
42666 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
42667-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42668+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42669 INF("syscall", S_IRUGO, proc_pid_syscall),
42670 #endif
42671 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42672@@ -2820,10 +2935,10 @@ static const struct pid_entry tgid_base_
42673 #ifdef CONFIG_SECURITY
42674 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42675 #endif
42676-#ifdef CONFIG_KALLSYMS
42677+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42678 INF("wchan", S_IRUGO, proc_pid_wchan),
42679 #endif
42680-#ifdef CONFIG_STACKTRACE
42681+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42682 ONE("stack", S_IRUGO, proc_pid_stack),
42683 #endif
42684 #ifdef CONFIG_SCHEDSTATS
42685@@ -2857,6 +2972,9 @@ static const struct pid_entry tgid_base_
42686 #ifdef CONFIG_HARDWALL
42687 INF("hardwall", S_IRUGO, proc_pid_hardwall),
42688 #endif
42689+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42690+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
42691+#endif
42692 };
42693
42694 static int proc_tgid_base_readdir(struct file * filp,
42695@@ -2982,7 +3100,14 @@ static struct dentry *proc_pid_instantia
42696 if (!inode)
42697 goto out;
42698
42699+#ifdef CONFIG_GRKERNSEC_PROC_USER
42700+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
42701+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42702+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42703+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
42704+#else
42705 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
42706+#endif
42707 inode->i_op = &proc_tgid_base_inode_operations;
42708 inode->i_fop = &proc_tgid_base_operations;
42709 inode->i_flags|=S_IMMUTABLE;
42710@@ -3024,7 +3149,11 @@ struct dentry *proc_pid_lookup(struct in
42711 if (!task)
42712 goto out;
42713
42714+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42715+ goto out_put_task;
42716+
42717 result = proc_pid_instantiate(dir, dentry, task, NULL);
42718+out_put_task:
42719 put_task_struct(task);
42720 out:
42721 return result;
42722@@ -3089,6 +3218,11 @@ int proc_pid_readdir(struct file * filp,
42723 {
42724 unsigned int nr;
42725 struct task_struct *reaper;
42726+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42727+ const struct cred *tmpcred = current_cred();
42728+ const struct cred *itercred;
42729+#endif
42730+ filldir_t __filldir = filldir;
42731 struct tgid_iter iter;
42732 struct pid_namespace *ns;
42733
42734@@ -3112,8 +3246,27 @@ int proc_pid_readdir(struct file * filp,
42735 for (iter = next_tgid(ns, iter);
42736 iter.task;
42737 iter.tgid += 1, iter = next_tgid(ns, iter)) {
42738+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42739+ rcu_read_lock();
42740+ itercred = __task_cred(iter.task);
42741+#endif
42742+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
42743+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42744+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
42745+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42746+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42747+#endif
42748+ )
42749+#endif
42750+ )
42751+ __filldir = &gr_fake_filldir;
42752+ else
42753+ __filldir = filldir;
42754+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42755+ rcu_read_unlock();
42756+#endif
42757 filp->f_pos = iter.tgid + TGID_OFFSET;
42758- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
42759+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
42760 put_task_struct(iter.task);
42761 goto out;
42762 }
42763@@ -3141,7 +3294,7 @@ static const struct pid_entry tid_base_s
42764 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42765 #endif
42766 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
42767-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42768+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42769 INF("syscall", S_IRUGO, proc_pid_syscall),
42770 #endif
42771 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42772@@ -3165,10 +3318,10 @@ static const struct pid_entry tid_base_s
42773 #ifdef CONFIG_SECURITY
42774 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42775 #endif
42776-#ifdef CONFIG_KALLSYMS
42777+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42778 INF("wchan", S_IRUGO, proc_pid_wchan),
42779 #endif
42780-#ifdef CONFIG_STACKTRACE
42781+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42782 ONE("stack", S_IRUGO, proc_pid_stack),
42783 #endif
42784 #ifdef CONFIG_SCHEDSTATS
42785diff -urNp linux-3.0.4/fs/proc/cmdline.c linux-3.0.4/fs/proc/cmdline.c
42786--- linux-3.0.4/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
42787+++ linux-3.0.4/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
42788@@ -23,7 +23,11 @@ static const struct file_operations cmdl
42789
42790 static int __init proc_cmdline_init(void)
42791 {
42792+#ifdef CONFIG_GRKERNSEC_PROC_ADD
42793+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
42794+#else
42795 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
42796+#endif
42797 return 0;
42798 }
42799 module_init(proc_cmdline_init);
42800diff -urNp linux-3.0.4/fs/proc/devices.c linux-3.0.4/fs/proc/devices.c
42801--- linux-3.0.4/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
42802+++ linux-3.0.4/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
42803@@ -64,7 +64,11 @@ static const struct file_operations proc
42804
42805 static int __init proc_devices_init(void)
42806 {
42807+#ifdef CONFIG_GRKERNSEC_PROC_ADD
42808+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
42809+#else
42810 proc_create("devices", 0, NULL, &proc_devinfo_operations);
42811+#endif
42812 return 0;
42813 }
42814 module_init(proc_devices_init);
42815diff -urNp linux-3.0.4/fs/proc/inode.c linux-3.0.4/fs/proc/inode.c
42816--- linux-3.0.4/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
42817+++ linux-3.0.4/fs/proc/inode.c 2011-08-23 21:48:14.000000000 -0400
42818@@ -440,7 +440,11 @@ struct inode *proc_get_inode(struct supe
42819 if (de->mode) {
42820 inode->i_mode = de->mode;
42821 inode->i_uid = de->uid;
42822+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42823+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42824+#else
42825 inode->i_gid = de->gid;
42826+#endif
42827 }
42828 if (de->size)
42829 inode->i_size = de->size;
42830diff -urNp linux-3.0.4/fs/proc/internal.h linux-3.0.4/fs/proc/internal.h
42831--- linux-3.0.4/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
42832+++ linux-3.0.4/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
42833@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
42834 struct pid *pid, struct task_struct *task);
42835 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
42836 struct pid *pid, struct task_struct *task);
42837+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42838+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
42839+#endif
42840 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
42841
42842 extern const struct file_operations proc_maps_operations;
42843diff -urNp linux-3.0.4/fs/proc/Kconfig linux-3.0.4/fs/proc/Kconfig
42844--- linux-3.0.4/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
42845+++ linux-3.0.4/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
42846@@ -30,12 +30,12 @@ config PROC_FS
42847
42848 config PROC_KCORE
42849 bool "/proc/kcore support" if !ARM
42850- depends on PROC_FS && MMU
42851+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
42852
42853 config PROC_VMCORE
42854 bool "/proc/vmcore support"
42855- depends on PROC_FS && CRASH_DUMP
42856- default y
42857+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
42858+ default n
42859 help
42860 Exports the dump image of crashed kernel in ELF format.
42861
42862@@ -59,8 +59,8 @@ config PROC_SYSCTL
42863 limited in memory.
42864
42865 config PROC_PAGE_MONITOR
42866- default y
42867- depends on PROC_FS && MMU
42868+ default n
42869+ depends on PROC_FS && MMU && !GRKERNSEC
42870 bool "Enable /proc page monitoring" if EXPERT
42871 help
42872 Various /proc files exist to monitor process memory utilization:
42873diff -urNp linux-3.0.4/fs/proc/kcore.c linux-3.0.4/fs/proc/kcore.c
42874--- linux-3.0.4/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
42875+++ linux-3.0.4/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
42876@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
42877 off_t offset = 0;
42878 struct kcore_list *m;
42879
42880+ pax_track_stack();
42881+
42882 /* setup ELF header */
42883 elf = (struct elfhdr *) bufp;
42884 bufp += sizeof(struct elfhdr);
42885@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
42886 * the addresses in the elf_phdr on our list.
42887 */
42888 start = kc_offset_to_vaddr(*fpos - elf_buflen);
42889- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
42890+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
42891+ if (tsz > buflen)
42892 tsz = buflen;
42893-
42894+
42895 while (buflen) {
42896 struct kcore_list *m;
42897
42898@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
42899 kfree(elf_buf);
42900 } else {
42901 if (kern_addr_valid(start)) {
42902- unsigned long n;
42903+ char *elf_buf;
42904+ mm_segment_t oldfs;
42905
42906- n = copy_to_user(buffer, (char *)start, tsz);
42907- /*
42908- * We cannot distingush between fault on source
42909- * and fault on destination. When this happens
42910- * we clear too and hope it will trigger the
42911- * EFAULT again.
42912- */
42913- if (n) {
42914- if (clear_user(buffer + tsz - n,
42915- n))
42916+ elf_buf = kmalloc(tsz, GFP_KERNEL);
42917+ if (!elf_buf)
42918+ return -ENOMEM;
42919+ oldfs = get_fs();
42920+ set_fs(KERNEL_DS);
42921+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
42922+ set_fs(oldfs);
42923+ if (copy_to_user(buffer, elf_buf, tsz)) {
42924+ kfree(elf_buf);
42925 return -EFAULT;
42926+ }
42927 }
42928+ set_fs(oldfs);
42929+ kfree(elf_buf);
42930 } else {
42931 if (clear_user(buffer, tsz))
42932 return -EFAULT;
42933@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
42934
42935 static int open_kcore(struct inode *inode, struct file *filp)
42936 {
42937+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
42938+ return -EPERM;
42939+#endif
42940 if (!capable(CAP_SYS_RAWIO))
42941 return -EPERM;
42942 if (kcore_need_update)
42943diff -urNp linux-3.0.4/fs/proc/meminfo.c linux-3.0.4/fs/proc/meminfo.c
42944--- linux-3.0.4/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
42945+++ linux-3.0.4/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
42946@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
42947 unsigned long pages[NR_LRU_LISTS];
42948 int lru;
42949
42950+ pax_track_stack();
42951+
42952 /*
42953 * display in kilobytes.
42954 */
42955@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
42956 vmi.used >> 10,
42957 vmi.largest_chunk >> 10
42958 #ifdef CONFIG_MEMORY_FAILURE
42959- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
42960+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
42961 #endif
42962 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
42963 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
42964diff -urNp linux-3.0.4/fs/proc/nommu.c linux-3.0.4/fs/proc/nommu.c
42965--- linux-3.0.4/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
42966+++ linux-3.0.4/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
42967@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
42968 if (len < 1)
42969 len = 1;
42970 seq_printf(m, "%*c", len, ' ');
42971- seq_path(m, &file->f_path, "");
42972+ seq_path(m, &file->f_path, "\n\\");
42973 }
42974
42975 seq_putc(m, '\n');
42976diff -urNp linux-3.0.4/fs/proc/proc_net.c linux-3.0.4/fs/proc/proc_net.c
42977--- linux-3.0.4/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
42978+++ linux-3.0.4/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
42979@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
42980 struct task_struct *task;
42981 struct nsproxy *ns;
42982 struct net *net = NULL;
42983+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42984+ const struct cred *cred = current_cred();
42985+#endif
42986+
42987+#ifdef CONFIG_GRKERNSEC_PROC_USER
42988+ if (cred->fsuid)
42989+ return net;
42990+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42991+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
42992+ return net;
42993+#endif
42994
42995 rcu_read_lock();
42996 task = pid_task(proc_pid(dir), PIDTYPE_PID);
42997diff -urNp linux-3.0.4/fs/proc/proc_sysctl.c linux-3.0.4/fs/proc/proc_sysctl.c
42998--- linux-3.0.4/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
42999+++ linux-3.0.4/fs/proc/proc_sysctl.c 2011-08-23 21:48:14.000000000 -0400
43000@@ -8,6 +8,8 @@
43001 #include <linux/namei.h>
43002 #include "internal.h"
43003
43004+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
43005+
43006 static const struct dentry_operations proc_sys_dentry_operations;
43007 static const struct file_operations proc_sys_file_operations;
43008 static const struct inode_operations proc_sys_inode_operations;
43009@@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
43010 if (!p)
43011 goto out;
43012
43013+ if (gr_handle_sysctl(p, MAY_EXEC))
43014+ goto out;
43015+
43016 err = ERR_PTR(-ENOMEM);
43017 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
43018 if (h)
43019@@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
43020 if (*pos < file->f_pos)
43021 continue;
43022
43023+ if (gr_handle_sysctl(table, 0))
43024+ continue;
43025+
43026 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
43027 if (res)
43028 return res;
43029@@ -355,6 +363,9 @@ static int proc_sys_getattr(struct vfsmo
43030 if (IS_ERR(head))
43031 return PTR_ERR(head);
43032
43033+ if (table && gr_handle_sysctl(table, MAY_EXEC))
43034+ return -ENOENT;
43035+
43036 generic_fillattr(inode, stat);
43037 if (table)
43038 stat->mode = (stat->mode & S_IFMT) | table->mode;
43039diff -urNp linux-3.0.4/fs/proc/root.c linux-3.0.4/fs/proc/root.c
43040--- linux-3.0.4/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
43041+++ linux-3.0.4/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
43042@@ -123,7 +123,15 @@ void __init proc_root_init(void)
43043 #ifdef CONFIG_PROC_DEVICETREE
43044 proc_device_tree_init();
43045 #endif
43046+#ifdef CONFIG_GRKERNSEC_PROC_ADD
43047+#ifdef CONFIG_GRKERNSEC_PROC_USER
43048+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
43049+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43050+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
43051+#endif
43052+#else
43053 proc_mkdir("bus", NULL);
43054+#endif
43055 proc_sys_init();
43056 }
43057
43058diff -urNp linux-3.0.4/fs/proc/task_mmu.c linux-3.0.4/fs/proc/task_mmu.c
43059--- linux-3.0.4/fs/proc/task_mmu.c 2011-07-21 22:17:23.000000000 -0400
43060+++ linux-3.0.4/fs/proc/task_mmu.c 2011-08-23 21:48:14.000000000 -0400
43061@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
43062 "VmExe:\t%8lu kB\n"
43063 "VmLib:\t%8lu kB\n"
43064 "VmPTE:\t%8lu kB\n"
43065- "VmSwap:\t%8lu kB\n",
43066- hiwater_vm << (PAGE_SHIFT-10),
43067+ "VmSwap:\t%8lu kB\n"
43068+
43069+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43070+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
43071+#endif
43072+
43073+ ,hiwater_vm << (PAGE_SHIFT-10),
43074 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
43075 mm->locked_vm << (PAGE_SHIFT-10),
43076 hiwater_rss << (PAGE_SHIFT-10),
43077@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
43078 data << (PAGE_SHIFT-10),
43079 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
43080 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
43081- swap << (PAGE_SHIFT-10));
43082+ swap << (PAGE_SHIFT-10)
43083+
43084+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43085+ , mm->context.user_cs_base, mm->context.user_cs_limit
43086+#endif
43087+
43088+ );
43089 }
43090
43091 unsigned long task_vsize(struct mm_struct *mm)
43092@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
43093 return ret;
43094 }
43095
43096+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43097+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
43098+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
43099+ _mm->pax_flags & MF_PAX_SEGMEXEC))
43100+#endif
43101+
43102 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
43103 {
43104 struct mm_struct *mm = vma->vm_mm;
43105@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
43106 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
43107 }
43108
43109- /* We don't show the stack guard page in /proc/maps */
43110+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43111+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
43112+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
43113+#else
43114 start = vma->vm_start;
43115- if (stack_guard_page_start(vma, start))
43116- start += PAGE_SIZE;
43117 end = vma->vm_end;
43118- if (stack_guard_page_end(vma, end))
43119- end -= PAGE_SIZE;
43120+#endif
43121
43122 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
43123 start,
43124@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
43125 flags & VM_WRITE ? 'w' : '-',
43126 flags & VM_EXEC ? 'x' : '-',
43127 flags & VM_MAYSHARE ? 's' : 'p',
43128+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43129+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
43130+#else
43131 pgoff,
43132+#endif
43133 MAJOR(dev), MINOR(dev), ino, &len);
43134
43135 /*
43136@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
43137 */
43138 if (file) {
43139 pad_len_spaces(m, len);
43140- seq_path(m, &file->f_path, "\n");
43141+ seq_path(m, &file->f_path, "\n\\");
43142 } else {
43143 const char *name = arch_vma_name(vma);
43144 if (!name) {
43145@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
43146 if (vma->vm_start <= mm->brk &&
43147 vma->vm_end >= mm->start_brk) {
43148 name = "[heap]";
43149- } else if (vma->vm_start <= mm->start_stack &&
43150- vma->vm_end >= mm->start_stack) {
43151+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
43152+ (vma->vm_start <= mm->start_stack &&
43153+ vma->vm_end >= mm->start_stack)) {
43154 name = "[stack]";
43155 }
43156 } else {
43157@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
43158 };
43159
43160 memset(&mss, 0, sizeof mss);
43161- mss.vma = vma;
43162- /* mmap_sem is held in m_start */
43163- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43164- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43165-
43166+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43167+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
43168+#endif
43169+ mss.vma = vma;
43170+ /* mmap_sem is held in m_start */
43171+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43172+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43173+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43174+ }
43175+#endif
43176 show_map_vma(m, vma);
43177
43178 seq_printf(m,
43179@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
43180 "KernelPageSize: %8lu kB\n"
43181 "MMUPageSize: %8lu kB\n"
43182 "Locked: %8lu kB\n",
43183+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43184+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
43185+#else
43186 (vma->vm_end - vma->vm_start) >> 10,
43187+#endif
43188 mss.resident >> 10,
43189 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
43190 mss.shared_clean >> 10,
43191@@ -1001,7 +1032,7 @@ static int show_numa_map(struct seq_file
43192
43193 if (file) {
43194 seq_printf(m, " file=");
43195- seq_path(m, &file->f_path, "\n\t= ");
43196+ seq_path(m, &file->f_path, "\n\t\\= ");
43197 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
43198 seq_printf(m, " heap");
43199 } else if (vma->vm_start <= mm->start_stack &&
43200diff -urNp linux-3.0.4/fs/proc/task_nommu.c linux-3.0.4/fs/proc/task_nommu.c
43201--- linux-3.0.4/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
43202+++ linux-3.0.4/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
43203@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
43204 else
43205 bytes += kobjsize(mm);
43206
43207- if (current->fs && current->fs->users > 1)
43208+ if (current->fs && atomic_read(&current->fs->users) > 1)
43209 sbytes += kobjsize(current->fs);
43210 else
43211 bytes += kobjsize(current->fs);
43212@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
43213
43214 if (file) {
43215 pad_len_spaces(m, len);
43216- seq_path(m, &file->f_path, "");
43217+ seq_path(m, &file->f_path, "\n\\");
43218 } else if (mm) {
43219 if (vma->vm_start <= mm->start_stack &&
43220 vma->vm_end >= mm->start_stack) {
43221diff -urNp linux-3.0.4/fs/quota/netlink.c linux-3.0.4/fs/quota/netlink.c
43222--- linux-3.0.4/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
43223+++ linux-3.0.4/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
43224@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
43225 void quota_send_warning(short type, unsigned int id, dev_t dev,
43226 const char warntype)
43227 {
43228- static atomic_t seq;
43229+ static atomic_unchecked_t seq;
43230 struct sk_buff *skb;
43231 void *msg_head;
43232 int ret;
43233@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
43234 "VFS: Not enough memory to send quota warning.\n");
43235 return;
43236 }
43237- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
43238+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
43239 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
43240 if (!msg_head) {
43241 printk(KERN_ERR
43242diff -urNp linux-3.0.4/fs/readdir.c linux-3.0.4/fs/readdir.c
43243--- linux-3.0.4/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
43244+++ linux-3.0.4/fs/readdir.c 2011-08-23 21:48:14.000000000 -0400
43245@@ -17,6 +17,7 @@
43246 #include <linux/security.h>
43247 #include <linux/syscalls.h>
43248 #include <linux/unistd.h>
43249+#include <linux/namei.h>
43250
43251 #include <asm/uaccess.h>
43252
43253@@ -67,6 +68,7 @@ struct old_linux_dirent {
43254
43255 struct readdir_callback {
43256 struct old_linux_dirent __user * dirent;
43257+ struct file * file;
43258 int result;
43259 };
43260
43261@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
43262 buf->result = -EOVERFLOW;
43263 return -EOVERFLOW;
43264 }
43265+
43266+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43267+ return 0;
43268+
43269 buf->result++;
43270 dirent = buf->dirent;
43271 if (!access_ok(VERIFY_WRITE, dirent,
43272@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
43273
43274 buf.result = 0;
43275 buf.dirent = dirent;
43276+ buf.file = file;
43277
43278 error = vfs_readdir(file, fillonedir, &buf);
43279 if (buf.result)
43280@@ -142,6 +149,7 @@ struct linux_dirent {
43281 struct getdents_callback {
43282 struct linux_dirent __user * current_dir;
43283 struct linux_dirent __user * previous;
43284+ struct file * file;
43285 int count;
43286 int error;
43287 };
43288@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
43289 buf->error = -EOVERFLOW;
43290 return -EOVERFLOW;
43291 }
43292+
43293+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43294+ return 0;
43295+
43296 dirent = buf->previous;
43297 if (dirent) {
43298 if (__put_user(offset, &dirent->d_off))
43299@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
43300 buf.previous = NULL;
43301 buf.count = count;
43302 buf.error = 0;
43303+ buf.file = file;
43304
43305 error = vfs_readdir(file, filldir, &buf);
43306 if (error >= 0)
43307@@ -229,6 +242,7 @@ out:
43308 struct getdents_callback64 {
43309 struct linux_dirent64 __user * current_dir;
43310 struct linux_dirent64 __user * previous;
43311+ struct file *file;
43312 int count;
43313 int error;
43314 };
43315@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
43316 buf->error = -EINVAL; /* only used if we fail.. */
43317 if (reclen > buf->count)
43318 return -EINVAL;
43319+
43320+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43321+ return 0;
43322+
43323 dirent = buf->previous;
43324 if (dirent) {
43325 if (__put_user(offset, &dirent->d_off))
43326@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
43327
43328 buf.current_dir = dirent;
43329 buf.previous = NULL;
43330+ buf.file = file;
43331 buf.count = count;
43332 buf.error = 0;
43333
43334diff -urNp linux-3.0.4/fs/reiserfs/dir.c linux-3.0.4/fs/reiserfs/dir.c
43335--- linux-3.0.4/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
43336+++ linux-3.0.4/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
43337@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
43338 struct reiserfs_dir_entry de;
43339 int ret = 0;
43340
43341+ pax_track_stack();
43342+
43343 reiserfs_write_lock(inode->i_sb);
43344
43345 reiserfs_check_lock_depth(inode->i_sb, "readdir");
43346diff -urNp linux-3.0.4/fs/reiserfs/do_balan.c linux-3.0.4/fs/reiserfs/do_balan.c
43347--- linux-3.0.4/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
43348+++ linux-3.0.4/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
43349@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
43350 return;
43351 }
43352
43353- atomic_inc(&(fs_generation(tb->tb_sb)));
43354+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
43355 do_balance_starts(tb);
43356
43357 /* balance leaf returns 0 except if combining L R and S into
43358diff -urNp linux-3.0.4/fs/reiserfs/journal.c linux-3.0.4/fs/reiserfs/journal.c
43359--- linux-3.0.4/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
43360+++ linux-3.0.4/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
43361@@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
43362 struct buffer_head *bh;
43363 int i, j;
43364
43365+ pax_track_stack();
43366+
43367 bh = __getblk(dev, block, bufsize);
43368 if (buffer_uptodate(bh))
43369 return (bh);
43370diff -urNp linux-3.0.4/fs/reiserfs/namei.c linux-3.0.4/fs/reiserfs/namei.c
43371--- linux-3.0.4/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
43372+++ linux-3.0.4/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
43373@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
43374 unsigned long savelink = 1;
43375 struct timespec ctime;
43376
43377+ pax_track_stack();
43378+
43379 /* three balancings: (1) old name removal, (2) new name insertion
43380 and (3) maybe "save" link insertion
43381 stat data updates: (1) old directory,
43382diff -urNp linux-3.0.4/fs/reiserfs/procfs.c linux-3.0.4/fs/reiserfs/procfs.c
43383--- linux-3.0.4/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
43384+++ linux-3.0.4/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
43385@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
43386 "SMALL_TAILS " : "NO_TAILS ",
43387 replay_only(sb) ? "REPLAY_ONLY " : "",
43388 convert_reiserfs(sb) ? "CONV " : "",
43389- atomic_read(&r->s_generation_counter),
43390+ atomic_read_unchecked(&r->s_generation_counter),
43391 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
43392 SF(s_do_balance), SF(s_unneeded_left_neighbor),
43393 SF(s_good_search_by_key_reada), SF(s_bmaps),
43394@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
43395 struct journal_params *jp = &rs->s_v1.s_journal;
43396 char b[BDEVNAME_SIZE];
43397
43398+ pax_track_stack();
43399+
43400 seq_printf(m, /* on-disk fields */
43401 "jp_journal_1st_block: \t%i\n"
43402 "jp_journal_dev: \t%s[%x]\n"
43403diff -urNp linux-3.0.4/fs/reiserfs/stree.c linux-3.0.4/fs/reiserfs/stree.c
43404--- linux-3.0.4/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
43405+++ linux-3.0.4/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
43406@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
43407 int iter = 0;
43408 #endif
43409
43410+ pax_track_stack();
43411+
43412 BUG_ON(!th->t_trans_id);
43413
43414 init_tb_struct(th, &s_del_balance, sb, path,
43415@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
43416 int retval;
43417 int quota_cut_bytes = 0;
43418
43419+ pax_track_stack();
43420+
43421 BUG_ON(!th->t_trans_id);
43422
43423 le_key2cpu_key(&cpu_key, key);
43424@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
43425 int quota_cut_bytes;
43426 loff_t tail_pos = 0;
43427
43428+ pax_track_stack();
43429+
43430 BUG_ON(!th->t_trans_id);
43431
43432 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
43433@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
43434 int retval;
43435 int fs_gen;
43436
43437+ pax_track_stack();
43438+
43439 BUG_ON(!th->t_trans_id);
43440
43441 fs_gen = get_generation(inode->i_sb);
43442@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
43443 int fs_gen = 0;
43444 int quota_bytes = 0;
43445
43446+ pax_track_stack();
43447+
43448 BUG_ON(!th->t_trans_id);
43449
43450 if (inode) { /* Do we count quotas for item? */
43451diff -urNp linux-3.0.4/fs/reiserfs/super.c linux-3.0.4/fs/reiserfs/super.c
43452--- linux-3.0.4/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
43453+++ linux-3.0.4/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
43454@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
43455 {.option_name = NULL}
43456 };
43457
43458+ pax_track_stack();
43459+
43460 *blocks = 0;
43461 if (!options || !*options)
43462 /* use default configuration: create tails, journaling on, no
43463diff -urNp linux-3.0.4/fs/select.c linux-3.0.4/fs/select.c
43464--- linux-3.0.4/fs/select.c 2011-07-21 22:17:23.000000000 -0400
43465+++ linux-3.0.4/fs/select.c 2011-08-23 21:48:14.000000000 -0400
43466@@ -20,6 +20,7 @@
43467 #include <linux/module.h>
43468 #include <linux/slab.h>
43469 #include <linux/poll.h>
43470+#include <linux/security.h>
43471 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
43472 #include <linux/file.h>
43473 #include <linux/fdtable.h>
43474@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
43475 int retval, i, timed_out = 0;
43476 unsigned long slack = 0;
43477
43478+ pax_track_stack();
43479+
43480 rcu_read_lock();
43481 retval = max_select_fd(n, fds);
43482 rcu_read_unlock();
43483@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
43484 /* Allocate small arguments on the stack to save memory and be faster */
43485 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
43486
43487+ pax_track_stack();
43488+
43489 ret = -EINVAL;
43490 if (n < 0)
43491 goto out_nofds;
43492@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
43493 struct poll_list *walk = head;
43494 unsigned long todo = nfds;
43495
43496+ pax_track_stack();
43497+
43498+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
43499 if (nfds > rlimit(RLIMIT_NOFILE))
43500 return -EINVAL;
43501
43502diff -urNp linux-3.0.4/fs/seq_file.c linux-3.0.4/fs/seq_file.c
43503--- linux-3.0.4/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
43504+++ linux-3.0.4/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
43505@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
43506 return 0;
43507 }
43508 if (!m->buf) {
43509- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43510+ m->size = PAGE_SIZE;
43511+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43512 if (!m->buf)
43513 return -ENOMEM;
43514 }
43515@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
43516 Eoverflow:
43517 m->op->stop(m, p);
43518 kfree(m->buf);
43519- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43520+ m->size <<= 1;
43521+ m->buf = kmalloc(m->size, GFP_KERNEL);
43522 return !m->buf ? -ENOMEM : -EAGAIN;
43523 }
43524
43525@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
43526 m->version = file->f_version;
43527 /* grab buffer if we didn't have one */
43528 if (!m->buf) {
43529- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43530+ m->size = PAGE_SIZE;
43531+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43532 if (!m->buf)
43533 goto Enomem;
43534 }
43535@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
43536 goto Fill;
43537 m->op->stop(m, p);
43538 kfree(m->buf);
43539- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43540+ m->size <<= 1;
43541+ m->buf = kmalloc(m->size, GFP_KERNEL);
43542 if (!m->buf)
43543 goto Enomem;
43544 m->count = 0;
43545@@ -549,7 +553,7 @@ static void single_stop(struct seq_file
43546 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
43547 void *data)
43548 {
43549- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
43550+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
43551 int res = -ENOMEM;
43552
43553 if (op) {
43554diff -urNp linux-3.0.4/fs/splice.c linux-3.0.4/fs/splice.c
43555--- linux-3.0.4/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
43556+++ linux-3.0.4/fs/splice.c 2011-08-23 21:48:14.000000000 -0400
43557@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
43558 pipe_lock(pipe);
43559
43560 for (;;) {
43561- if (!pipe->readers) {
43562+ if (!atomic_read(&pipe->readers)) {
43563 send_sig(SIGPIPE, current, 0);
43564 if (!ret)
43565 ret = -EPIPE;
43566@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
43567 do_wakeup = 0;
43568 }
43569
43570- pipe->waiting_writers++;
43571+ atomic_inc(&pipe->waiting_writers);
43572 pipe_wait(pipe);
43573- pipe->waiting_writers--;
43574+ atomic_dec(&pipe->waiting_writers);
43575 }
43576
43577 pipe_unlock(pipe);
43578@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
43579 .spd_release = spd_release_page,
43580 };
43581
43582+ pax_track_stack();
43583+
43584 if (splice_grow_spd(pipe, &spd))
43585 return -ENOMEM;
43586
43587@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
43588 old_fs = get_fs();
43589 set_fs(get_ds());
43590 /* The cast to a user pointer is valid due to the set_fs() */
43591- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
43592+ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
43593 set_fs(old_fs);
43594
43595 return res;
43596@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
43597 old_fs = get_fs();
43598 set_fs(get_ds());
43599 /* The cast to a user pointer is valid due to the set_fs() */
43600- res = vfs_write(file, (const char __user *)buf, count, &pos);
43601+ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
43602 set_fs(old_fs);
43603
43604 return res;
43605@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
43606 .spd_release = spd_release_page,
43607 };
43608
43609+ pax_track_stack();
43610+
43611 if (splice_grow_spd(pipe, &spd))
43612 return -ENOMEM;
43613
43614@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
43615 goto err;
43616
43617 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
43618- vec[i].iov_base = (void __user *) page_address(page);
43619+ vec[i].iov_base = (__force void __user *) page_address(page);
43620 vec[i].iov_len = this_len;
43621 spd.pages[i] = page;
43622 spd.nr_pages++;
43623@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
43624 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
43625 {
43626 while (!pipe->nrbufs) {
43627- if (!pipe->writers)
43628+ if (!atomic_read(&pipe->writers))
43629 return 0;
43630
43631- if (!pipe->waiting_writers && sd->num_spliced)
43632+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
43633 return 0;
43634
43635 if (sd->flags & SPLICE_F_NONBLOCK)
43636@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
43637 * out of the pipe right after the splice_to_pipe(). So set
43638 * PIPE_READERS appropriately.
43639 */
43640- pipe->readers = 1;
43641+ atomic_set(&pipe->readers, 1);
43642
43643 current->splice_pipe = pipe;
43644 }
43645@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
43646 };
43647 long ret;
43648
43649+ pax_track_stack();
43650+
43651 pipe = get_pipe_info(file);
43652 if (!pipe)
43653 return -EBADF;
43654@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
43655 ret = -ERESTARTSYS;
43656 break;
43657 }
43658- if (!pipe->writers)
43659+ if (!atomic_read(&pipe->writers))
43660 break;
43661- if (!pipe->waiting_writers) {
43662+ if (!atomic_read(&pipe->waiting_writers)) {
43663 if (flags & SPLICE_F_NONBLOCK) {
43664 ret = -EAGAIN;
43665 break;
43666@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
43667 pipe_lock(pipe);
43668
43669 while (pipe->nrbufs >= pipe->buffers) {
43670- if (!pipe->readers) {
43671+ if (!atomic_read(&pipe->readers)) {
43672 send_sig(SIGPIPE, current, 0);
43673 ret = -EPIPE;
43674 break;
43675@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
43676 ret = -ERESTARTSYS;
43677 break;
43678 }
43679- pipe->waiting_writers++;
43680+ atomic_inc(&pipe->waiting_writers);
43681 pipe_wait(pipe);
43682- pipe->waiting_writers--;
43683+ atomic_dec(&pipe->waiting_writers);
43684 }
43685
43686 pipe_unlock(pipe);
43687@@ -1819,14 +1825,14 @@ retry:
43688 pipe_double_lock(ipipe, opipe);
43689
43690 do {
43691- if (!opipe->readers) {
43692+ if (!atomic_read(&opipe->readers)) {
43693 send_sig(SIGPIPE, current, 0);
43694 if (!ret)
43695 ret = -EPIPE;
43696 break;
43697 }
43698
43699- if (!ipipe->nrbufs && !ipipe->writers)
43700+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
43701 break;
43702
43703 /*
43704@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
43705 pipe_double_lock(ipipe, opipe);
43706
43707 do {
43708- if (!opipe->readers) {
43709+ if (!atomic_read(&opipe->readers)) {
43710 send_sig(SIGPIPE, current, 0);
43711 if (!ret)
43712 ret = -EPIPE;
43713@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
43714 * return EAGAIN if we have the potential of some data in the
43715 * future, otherwise just return 0
43716 */
43717- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
43718+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
43719 ret = -EAGAIN;
43720
43721 pipe_unlock(ipipe);
43722diff -urNp linux-3.0.4/fs/sysfs/file.c linux-3.0.4/fs/sysfs/file.c
43723--- linux-3.0.4/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
43724+++ linux-3.0.4/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
43725@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
43726
43727 struct sysfs_open_dirent {
43728 atomic_t refcnt;
43729- atomic_t event;
43730+ atomic_unchecked_t event;
43731 wait_queue_head_t poll;
43732 struct list_head buffers; /* goes through sysfs_buffer.list */
43733 };
43734@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
43735 if (!sysfs_get_active(attr_sd))
43736 return -ENODEV;
43737
43738- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
43739+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
43740 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
43741
43742 sysfs_put_active(attr_sd);
43743@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
43744 return -ENOMEM;
43745
43746 atomic_set(&new_od->refcnt, 0);
43747- atomic_set(&new_od->event, 1);
43748+ atomic_set_unchecked(&new_od->event, 1);
43749 init_waitqueue_head(&new_od->poll);
43750 INIT_LIST_HEAD(&new_od->buffers);
43751 goto retry;
43752@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
43753
43754 sysfs_put_active(attr_sd);
43755
43756- if (buffer->event != atomic_read(&od->event))
43757+ if (buffer->event != atomic_read_unchecked(&od->event))
43758 goto trigger;
43759
43760 return DEFAULT_POLLMASK;
43761@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
43762
43763 od = sd->s_attr.open;
43764 if (od) {
43765- atomic_inc(&od->event);
43766+ atomic_inc_unchecked(&od->event);
43767 wake_up_interruptible(&od->poll);
43768 }
43769
43770diff -urNp linux-3.0.4/fs/sysfs/mount.c linux-3.0.4/fs/sysfs/mount.c
43771--- linux-3.0.4/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
43772+++ linux-3.0.4/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
43773@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
43774 .s_name = "",
43775 .s_count = ATOMIC_INIT(1),
43776 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
43777+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43778+ .s_mode = S_IFDIR | S_IRWXU,
43779+#else
43780 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43781+#endif
43782 .s_ino = 1,
43783 };
43784
43785diff -urNp linux-3.0.4/fs/sysfs/symlink.c linux-3.0.4/fs/sysfs/symlink.c
43786--- linux-3.0.4/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
43787+++ linux-3.0.4/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
43788@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
43789
43790 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
43791 {
43792- char *page = nd_get_link(nd);
43793+ const char *page = nd_get_link(nd);
43794 if (!IS_ERR(page))
43795 free_page((unsigned long)page);
43796 }
43797diff -urNp linux-3.0.4/fs/udf/inode.c linux-3.0.4/fs/udf/inode.c
43798--- linux-3.0.4/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
43799+++ linux-3.0.4/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
43800@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
43801 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
43802 int lastblock = 0;
43803
43804+ pax_track_stack();
43805+
43806 prev_epos.offset = udf_file_entry_alloc_offset(inode);
43807 prev_epos.block = iinfo->i_location;
43808 prev_epos.bh = NULL;
43809diff -urNp linux-3.0.4/fs/udf/misc.c linux-3.0.4/fs/udf/misc.c
43810--- linux-3.0.4/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
43811+++ linux-3.0.4/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
43812@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
43813
43814 u8 udf_tag_checksum(const struct tag *t)
43815 {
43816- u8 *data = (u8 *)t;
43817+ const u8 *data = (const u8 *)t;
43818 u8 checksum = 0;
43819 int i;
43820 for (i = 0; i < sizeof(struct tag); ++i)
43821diff -urNp linux-3.0.4/fs/utimes.c linux-3.0.4/fs/utimes.c
43822--- linux-3.0.4/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
43823+++ linux-3.0.4/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
43824@@ -1,6 +1,7 @@
43825 #include <linux/compiler.h>
43826 #include <linux/file.h>
43827 #include <linux/fs.h>
43828+#include <linux/security.h>
43829 #include <linux/linkage.h>
43830 #include <linux/mount.h>
43831 #include <linux/namei.h>
43832@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
43833 goto mnt_drop_write_and_out;
43834 }
43835 }
43836+
43837+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
43838+ error = -EACCES;
43839+ goto mnt_drop_write_and_out;
43840+ }
43841+
43842 mutex_lock(&inode->i_mutex);
43843 error = notify_change(path->dentry, &newattrs);
43844 mutex_unlock(&inode->i_mutex);
43845diff -urNp linux-3.0.4/fs/xattr_acl.c linux-3.0.4/fs/xattr_acl.c
43846--- linux-3.0.4/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
43847+++ linux-3.0.4/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
43848@@ -17,8 +17,8 @@
43849 struct posix_acl *
43850 posix_acl_from_xattr(const void *value, size_t size)
43851 {
43852- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
43853- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
43854+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
43855+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
43856 int count;
43857 struct posix_acl *acl;
43858 struct posix_acl_entry *acl_e;
43859diff -urNp linux-3.0.4/fs/xattr.c linux-3.0.4/fs/xattr.c
43860--- linux-3.0.4/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
43861+++ linux-3.0.4/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
43862@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
43863 * Extended attribute SET operations
43864 */
43865 static long
43866-setxattr(struct dentry *d, const char __user *name, const void __user *value,
43867+setxattr(struct path *path, const char __user *name, const void __user *value,
43868 size_t size, int flags)
43869 {
43870 int error;
43871@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
43872 return PTR_ERR(kvalue);
43873 }
43874
43875- error = vfs_setxattr(d, kname, kvalue, size, flags);
43876+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
43877+ error = -EACCES;
43878+ goto out;
43879+ }
43880+
43881+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
43882+out:
43883 kfree(kvalue);
43884 return error;
43885 }
43886@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
43887 return error;
43888 error = mnt_want_write(path.mnt);
43889 if (!error) {
43890- error = setxattr(path.dentry, name, value, size, flags);
43891+ error = setxattr(&path, name, value, size, flags);
43892 mnt_drop_write(path.mnt);
43893 }
43894 path_put(&path);
43895@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
43896 return error;
43897 error = mnt_want_write(path.mnt);
43898 if (!error) {
43899- error = setxattr(path.dentry, name, value, size, flags);
43900+ error = setxattr(&path, name, value, size, flags);
43901 mnt_drop_write(path.mnt);
43902 }
43903 path_put(&path);
43904@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
43905 const void __user *,value, size_t, size, int, flags)
43906 {
43907 struct file *f;
43908- struct dentry *dentry;
43909 int error = -EBADF;
43910
43911 f = fget(fd);
43912 if (!f)
43913 return error;
43914- dentry = f->f_path.dentry;
43915- audit_inode(NULL, dentry);
43916+ audit_inode(NULL, f->f_path.dentry);
43917 error = mnt_want_write_file(f);
43918 if (!error) {
43919- error = setxattr(dentry, name, value, size, flags);
43920+ error = setxattr(&f->f_path, name, value, size, flags);
43921 mnt_drop_write(f->f_path.mnt);
43922 }
43923 fput(f);
43924diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c
43925--- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
43926+++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
43927@@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
43928 xfs_fsop_geom_t fsgeo;
43929 int error;
43930
43931+ memset(&fsgeo, 0, sizeof(fsgeo));
43932 error = xfs_fs_geometry(mp, &fsgeo, 3);
43933 if (error)
43934 return -error;
43935diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c
43936--- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
43937+++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
43938@@ -128,7 +128,7 @@ xfs_find_handle(
43939 }
43940
43941 error = -EFAULT;
43942- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
43943+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
43944 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
43945 goto out_put;
43946
43947diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c
43948--- linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
43949+++ linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
43950@@ -437,7 +437,7 @@ xfs_vn_put_link(
43951 struct nameidata *nd,
43952 void *p)
43953 {
43954- char *s = nd_get_link(nd);
43955+ const char *s = nd_get_link(nd);
43956
43957 if (!IS_ERR(s))
43958 kfree(s);
43959diff -urNp linux-3.0.4/fs/xfs/xfs_bmap.c linux-3.0.4/fs/xfs/xfs_bmap.c
43960--- linux-3.0.4/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
43961+++ linux-3.0.4/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
43962@@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
43963 int nmap,
43964 int ret_nmap);
43965 #else
43966-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
43967+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
43968 #endif /* DEBUG */
43969
43970 STATIC int
43971diff -urNp linux-3.0.4/fs/xfs/xfs_dir2_sf.c linux-3.0.4/fs/xfs/xfs_dir2_sf.c
43972--- linux-3.0.4/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
43973+++ linux-3.0.4/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
43974@@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
43975 }
43976
43977 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
43978- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
43979+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
43980+ char name[sfep->namelen];
43981+ memcpy(name, sfep->name, sfep->namelen);
43982+ if (filldir(dirent, name, sfep->namelen,
43983+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
43984+ *offset = off & 0x7fffffff;
43985+ return 0;
43986+ }
43987+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
43988 off & 0x7fffffff, ino, DT_UNKNOWN)) {
43989 *offset = off & 0x7fffffff;
43990 return 0;
43991diff -urNp linux-3.0.4/grsecurity/gracl_alloc.c linux-3.0.4/grsecurity/gracl_alloc.c
43992--- linux-3.0.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
43993+++ linux-3.0.4/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
43994@@ -0,0 +1,105 @@
43995+#include <linux/kernel.h>
43996+#include <linux/mm.h>
43997+#include <linux/slab.h>
43998+#include <linux/vmalloc.h>
43999+#include <linux/gracl.h>
44000+#include <linux/grsecurity.h>
44001+
44002+static unsigned long alloc_stack_next = 1;
44003+static unsigned long alloc_stack_size = 1;
44004+static void **alloc_stack;
44005+
44006+static __inline__ int
44007+alloc_pop(void)
44008+{
44009+ if (alloc_stack_next == 1)
44010+ return 0;
44011+
44012+ kfree(alloc_stack[alloc_stack_next - 2]);
44013+
44014+ alloc_stack_next--;
44015+
44016+ return 1;
44017+}
44018+
44019+static __inline__ int
44020+alloc_push(void *buf)
44021+{
44022+ if (alloc_stack_next >= alloc_stack_size)
44023+ return 1;
44024+
44025+ alloc_stack[alloc_stack_next - 1] = buf;
44026+
44027+ alloc_stack_next++;
44028+
44029+ return 0;
44030+}
44031+
44032+void *
44033+acl_alloc(unsigned long len)
44034+{
44035+ void *ret = NULL;
44036+
44037+ if (!len || len > PAGE_SIZE)
44038+ goto out;
44039+
44040+ ret = kmalloc(len, GFP_KERNEL);
44041+
44042+ if (ret) {
44043+ if (alloc_push(ret)) {
44044+ kfree(ret);
44045+ ret = NULL;
44046+ }
44047+ }
44048+
44049+out:
44050+ return ret;
44051+}
44052+
44053+void *
44054+acl_alloc_num(unsigned long num, unsigned long len)
44055+{
44056+ if (!len || (num > (PAGE_SIZE / len)))
44057+ return NULL;
44058+
44059+ return acl_alloc(num * len);
44060+}
44061+
44062+void
44063+acl_free_all(void)
44064+{
44065+ if (gr_acl_is_enabled() || !alloc_stack)
44066+ return;
44067+
44068+ while (alloc_pop()) ;
44069+
44070+ if (alloc_stack) {
44071+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
44072+ kfree(alloc_stack);
44073+ else
44074+ vfree(alloc_stack);
44075+ }
44076+
44077+ alloc_stack = NULL;
44078+ alloc_stack_size = 1;
44079+ alloc_stack_next = 1;
44080+
44081+ return;
44082+}
44083+
44084+int
44085+acl_alloc_stack_init(unsigned long size)
44086+{
44087+ if ((size * sizeof (void *)) <= PAGE_SIZE)
44088+ alloc_stack =
44089+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
44090+ else
44091+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
44092+
44093+ alloc_stack_size = size;
44094+
44095+ if (!alloc_stack)
44096+ return 0;
44097+ else
44098+ return 1;
44099+}
44100diff -urNp linux-3.0.4/grsecurity/gracl.c linux-3.0.4/grsecurity/gracl.c
44101--- linux-3.0.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
44102+++ linux-3.0.4/grsecurity/gracl.c 2011-08-23 21:48:14.000000000 -0400
44103@@ -0,0 +1,4106 @@
44104+#include <linux/kernel.h>
44105+#include <linux/module.h>
44106+#include <linux/sched.h>
44107+#include <linux/mm.h>
44108+#include <linux/file.h>
44109+#include <linux/fs.h>
44110+#include <linux/namei.h>
44111+#include <linux/mount.h>
44112+#include <linux/tty.h>
44113+#include <linux/proc_fs.h>
44114+#include <linux/lglock.h>
44115+#include <linux/slab.h>
44116+#include <linux/vmalloc.h>
44117+#include <linux/types.h>
44118+#include <linux/sysctl.h>
44119+#include <linux/netdevice.h>
44120+#include <linux/ptrace.h>
44121+#include <linux/gracl.h>
44122+#include <linux/gralloc.h>
44123+#include <linux/grsecurity.h>
44124+#include <linux/grinternal.h>
44125+#include <linux/pid_namespace.h>
44126+#include <linux/fdtable.h>
44127+#include <linux/percpu.h>
44128+
44129+#include <asm/uaccess.h>
44130+#include <asm/errno.h>
44131+#include <asm/mman.h>
44132+
44133+static struct acl_role_db acl_role_set;
44134+static struct name_db name_set;
44135+static struct inodev_db inodev_set;
44136+
44137+/* for keeping track of userspace pointers used for subjects, so we
44138+ can share references in the kernel as well
44139+*/
44140+
44141+static struct path real_root;
44142+
44143+static struct acl_subj_map_db subj_map_set;
44144+
44145+static struct acl_role_label *default_role;
44146+
44147+static struct acl_role_label *role_list;
44148+
44149+static u16 acl_sp_role_value;
44150+
44151+extern char *gr_shared_page[4];
44152+static DEFINE_MUTEX(gr_dev_mutex);
44153+DEFINE_RWLOCK(gr_inode_lock);
44154+
44155+struct gr_arg *gr_usermode;
44156+
44157+static unsigned int gr_status __read_only = GR_STATUS_INIT;
44158+
44159+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
44160+extern void gr_clear_learn_entries(void);
44161+
44162+#ifdef CONFIG_GRKERNSEC_RESLOG
44163+extern void gr_log_resource(const struct task_struct *task,
44164+ const int res, const unsigned long wanted, const int gt);
44165+#endif
44166+
44167+unsigned char *gr_system_salt;
44168+unsigned char *gr_system_sum;
44169+
44170+static struct sprole_pw **acl_special_roles = NULL;
44171+static __u16 num_sprole_pws = 0;
44172+
44173+static struct acl_role_label *kernel_role = NULL;
44174+
44175+static unsigned int gr_auth_attempts = 0;
44176+static unsigned long gr_auth_expires = 0UL;
44177+
44178+#ifdef CONFIG_NET
44179+extern struct vfsmount *sock_mnt;
44180+#endif
44181+
44182+extern struct vfsmount *pipe_mnt;
44183+extern struct vfsmount *shm_mnt;
44184+#ifdef CONFIG_HUGETLBFS
44185+extern struct vfsmount *hugetlbfs_vfsmount;
44186+#endif
44187+
44188+static struct acl_object_label *fakefs_obj_rw;
44189+static struct acl_object_label *fakefs_obj_rwx;
44190+
44191+extern int gr_init_uidset(void);
44192+extern void gr_free_uidset(void);
44193+extern void gr_remove_uid(uid_t uid);
44194+extern int gr_find_uid(uid_t uid);
44195+
44196+DECLARE_BRLOCK(vfsmount_lock);
44197+
44198+__inline__ int
44199+gr_acl_is_enabled(void)
44200+{
44201+ return (gr_status & GR_READY);
44202+}
44203+
44204+#ifdef CONFIG_BTRFS_FS
44205+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
44206+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
44207+#endif
44208+
44209+static inline dev_t __get_dev(const struct dentry *dentry)
44210+{
44211+#ifdef CONFIG_BTRFS_FS
44212+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
44213+ return get_btrfs_dev_from_inode(dentry->d_inode);
44214+ else
44215+#endif
44216+ return dentry->d_inode->i_sb->s_dev;
44217+}
44218+
44219+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
44220+{
44221+ return __get_dev(dentry);
44222+}
44223+
44224+static char gr_task_roletype_to_char(struct task_struct *task)
44225+{
44226+ switch (task->role->roletype &
44227+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
44228+ GR_ROLE_SPECIAL)) {
44229+ case GR_ROLE_DEFAULT:
44230+ return 'D';
44231+ case GR_ROLE_USER:
44232+ return 'U';
44233+ case GR_ROLE_GROUP:
44234+ return 'G';
44235+ case GR_ROLE_SPECIAL:
44236+ return 'S';
44237+ }
44238+
44239+ return 'X';
44240+}
44241+
44242+char gr_roletype_to_char(void)
44243+{
44244+ return gr_task_roletype_to_char(current);
44245+}
44246+
44247+__inline__ int
44248+gr_acl_tpe_check(void)
44249+{
44250+ if (unlikely(!(gr_status & GR_READY)))
44251+ return 0;
44252+ if (current->role->roletype & GR_ROLE_TPE)
44253+ return 1;
44254+ else
44255+ return 0;
44256+}
44257+
44258+int
44259+gr_handle_rawio(const struct inode *inode)
44260+{
44261+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
44262+ if (inode && S_ISBLK(inode->i_mode) &&
44263+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
44264+ !capable(CAP_SYS_RAWIO))
44265+ return 1;
44266+#endif
44267+ return 0;
44268+}
44269+
44270+static int
44271+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
44272+{
44273+ if (likely(lena != lenb))
44274+ return 0;
44275+
44276+ return !memcmp(a, b, lena);
44277+}
44278+
44279+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
44280+{
44281+ *buflen -= namelen;
44282+ if (*buflen < 0)
44283+ return -ENAMETOOLONG;
44284+ *buffer -= namelen;
44285+ memcpy(*buffer, str, namelen);
44286+ return 0;
44287+}
44288+
44289+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
44290+{
44291+ return prepend(buffer, buflen, name->name, name->len);
44292+}
44293+
44294+static int prepend_path(const struct path *path, struct path *root,
44295+ char **buffer, int *buflen)
44296+{
44297+ struct dentry *dentry = path->dentry;
44298+ struct vfsmount *vfsmnt = path->mnt;
44299+ bool slash = false;
44300+ int error = 0;
44301+
44302+ while (dentry != root->dentry || vfsmnt != root->mnt) {
44303+ struct dentry * parent;
44304+
44305+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
44306+ /* Global root? */
44307+ if (vfsmnt->mnt_parent == vfsmnt) {
44308+ goto out;
44309+ }
44310+ dentry = vfsmnt->mnt_mountpoint;
44311+ vfsmnt = vfsmnt->mnt_parent;
44312+ continue;
44313+ }
44314+ parent = dentry->d_parent;
44315+ prefetch(parent);
44316+ spin_lock(&dentry->d_lock);
44317+ error = prepend_name(buffer, buflen, &dentry->d_name);
44318+ spin_unlock(&dentry->d_lock);
44319+ if (!error)
44320+ error = prepend(buffer, buflen, "/", 1);
44321+ if (error)
44322+ break;
44323+
44324+ slash = true;
44325+ dentry = parent;
44326+ }
44327+
44328+out:
44329+ if (!error && !slash)
44330+ error = prepend(buffer, buflen, "/", 1);
44331+
44332+ return error;
44333+}
44334+
44335+/* this must be called with vfsmount_lock and rename_lock held */
44336+
44337+static char *__our_d_path(const struct path *path, struct path *root,
44338+ char *buf, int buflen)
44339+{
44340+ char *res = buf + buflen;
44341+ int error;
44342+
44343+ prepend(&res, &buflen, "\0", 1);
44344+ error = prepend_path(path, root, &res, &buflen);
44345+ if (error)
44346+ return ERR_PTR(error);
44347+
44348+ return res;
44349+}
44350+
44351+static char *
44352+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
44353+{
44354+ char *retval;
44355+
44356+ retval = __our_d_path(path, root, buf, buflen);
44357+ if (unlikely(IS_ERR(retval)))
44358+ retval = strcpy(buf, "<path too long>");
44359+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
44360+ retval[1] = '\0';
44361+
44362+ return retval;
44363+}
44364+
44365+static char *
44366+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44367+ char *buf, int buflen)
44368+{
44369+ struct path path;
44370+ char *res;
44371+
44372+ path.dentry = (struct dentry *)dentry;
44373+ path.mnt = (struct vfsmount *)vfsmnt;
44374+
44375+ /* we can use real_root.dentry, real_root.mnt, because this is only called
44376+ by the RBAC system */
44377+ res = gen_full_path(&path, &real_root, buf, buflen);
44378+
44379+ return res;
44380+}
44381+
44382+static char *
44383+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44384+ char *buf, int buflen)
44385+{
44386+ char *res;
44387+ struct path path;
44388+ struct path root;
44389+ struct task_struct *reaper = &init_task;
44390+
44391+ path.dentry = (struct dentry *)dentry;
44392+ path.mnt = (struct vfsmount *)vfsmnt;
44393+
44394+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
44395+ get_fs_root(reaper->fs, &root);
44396+
44397+ write_seqlock(&rename_lock);
44398+ br_read_lock(vfsmount_lock);
44399+ res = gen_full_path(&path, &root, buf, buflen);
44400+ br_read_unlock(vfsmount_lock);
44401+ write_sequnlock(&rename_lock);
44402+
44403+ path_put(&root);
44404+ return res;
44405+}
44406+
44407+static char *
44408+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
44409+{
44410+ char *ret;
44411+ write_seqlock(&rename_lock);
44412+ br_read_lock(vfsmount_lock);
44413+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44414+ PAGE_SIZE);
44415+ br_read_unlock(vfsmount_lock);
44416+ write_sequnlock(&rename_lock);
44417+ return ret;
44418+}
44419+
44420+char *
44421+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
44422+{
44423+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44424+ PAGE_SIZE);
44425+}
44426+
44427+char *
44428+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
44429+{
44430+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
44431+ PAGE_SIZE);
44432+}
44433+
44434+char *
44435+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
44436+{
44437+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
44438+ PAGE_SIZE);
44439+}
44440+
44441+char *
44442+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
44443+{
44444+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
44445+ PAGE_SIZE);
44446+}
44447+
44448+char *
44449+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
44450+{
44451+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
44452+ PAGE_SIZE);
44453+}
44454+
44455+__inline__ __u32
44456+to_gr_audit(const __u32 reqmode)
44457+{
44458+ /* masks off auditable permission flags, then shifts them to create
44459+ auditing flags, and adds the special case of append auditing if
44460+ we're requesting write */
44461+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
44462+}
44463+
44464+struct acl_subject_label *
44465+lookup_subject_map(const struct acl_subject_label *userp)
44466+{
44467+ unsigned int index = shash(userp, subj_map_set.s_size);
44468+ struct subject_map *match;
44469+
44470+ match = subj_map_set.s_hash[index];
44471+
44472+ while (match && match->user != userp)
44473+ match = match->next;
44474+
44475+ if (match != NULL)
44476+ return match->kernel;
44477+ else
44478+ return NULL;
44479+}
44480+
44481+static void
44482+insert_subj_map_entry(struct subject_map *subjmap)
44483+{
44484+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
44485+ struct subject_map **curr;
44486+
44487+ subjmap->prev = NULL;
44488+
44489+ curr = &subj_map_set.s_hash[index];
44490+ if (*curr != NULL)
44491+ (*curr)->prev = subjmap;
44492+
44493+ subjmap->next = *curr;
44494+ *curr = subjmap;
44495+
44496+ return;
44497+}
44498+
44499+static struct acl_role_label *
44500+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
44501+ const gid_t gid)
44502+{
44503+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
44504+ struct acl_role_label *match;
44505+ struct role_allowed_ip *ipp;
44506+ unsigned int x;
44507+ u32 curr_ip = task->signal->curr_ip;
44508+
44509+ task->signal->saved_ip = curr_ip;
44510+
44511+ match = acl_role_set.r_hash[index];
44512+
44513+ while (match) {
44514+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
44515+ for (x = 0; x < match->domain_child_num; x++) {
44516+ if (match->domain_children[x] == uid)
44517+ goto found;
44518+ }
44519+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
44520+ break;
44521+ match = match->next;
44522+ }
44523+found:
44524+ if (match == NULL) {
44525+ try_group:
44526+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
44527+ match = acl_role_set.r_hash[index];
44528+
44529+ while (match) {
44530+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
44531+ for (x = 0; x < match->domain_child_num; x++) {
44532+ if (match->domain_children[x] == gid)
44533+ goto found2;
44534+ }
44535+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
44536+ break;
44537+ match = match->next;
44538+ }
44539+found2:
44540+ if (match == NULL)
44541+ match = default_role;
44542+ if (match->allowed_ips == NULL)
44543+ return match;
44544+ else {
44545+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44546+ if (likely
44547+ ((ntohl(curr_ip) & ipp->netmask) ==
44548+ (ntohl(ipp->addr) & ipp->netmask)))
44549+ return match;
44550+ }
44551+ match = default_role;
44552+ }
44553+ } else if (match->allowed_ips == NULL) {
44554+ return match;
44555+ } else {
44556+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44557+ if (likely
44558+ ((ntohl(curr_ip) & ipp->netmask) ==
44559+ (ntohl(ipp->addr) & ipp->netmask)))
44560+ return match;
44561+ }
44562+ goto try_group;
44563+ }
44564+
44565+ return match;
44566+}
44567+
44568+struct acl_subject_label *
44569+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
44570+ const struct acl_role_label *role)
44571+{
44572+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
44573+ struct acl_subject_label *match;
44574+
44575+ match = role->subj_hash[index];
44576+
44577+ while (match && (match->inode != ino || match->device != dev ||
44578+ (match->mode & GR_DELETED))) {
44579+ match = match->next;
44580+ }
44581+
44582+ if (match && !(match->mode & GR_DELETED))
44583+ return match;
44584+ else
44585+ return NULL;
44586+}
44587+
44588+struct acl_subject_label *
44589+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
44590+ const struct acl_role_label *role)
44591+{
44592+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
44593+ struct acl_subject_label *match;
44594+
44595+ match = role->subj_hash[index];
44596+
44597+ while (match && (match->inode != ino || match->device != dev ||
44598+ !(match->mode & GR_DELETED))) {
44599+ match = match->next;
44600+ }
44601+
44602+ if (match && (match->mode & GR_DELETED))
44603+ return match;
44604+ else
44605+ return NULL;
44606+}
44607+
44608+static struct acl_object_label *
44609+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
44610+ const struct acl_subject_label *subj)
44611+{
44612+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44613+ struct acl_object_label *match;
44614+
44615+ match = subj->obj_hash[index];
44616+
44617+ while (match && (match->inode != ino || match->device != dev ||
44618+ (match->mode & GR_DELETED))) {
44619+ match = match->next;
44620+ }
44621+
44622+ if (match && !(match->mode & GR_DELETED))
44623+ return match;
44624+ else
44625+ return NULL;
44626+}
44627+
44628+static struct acl_object_label *
44629+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
44630+ const struct acl_subject_label *subj)
44631+{
44632+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44633+ struct acl_object_label *match;
44634+
44635+ match = subj->obj_hash[index];
44636+
44637+ while (match && (match->inode != ino || match->device != dev ||
44638+ !(match->mode & GR_DELETED))) {
44639+ match = match->next;
44640+ }
44641+
44642+ if (match && (match->mode & GR_DELETED))
44643+ return match;
44644+
44645+ match = subj->obj_hash[index];
44646+
44647+ while (match && (match->inode != ino || match->device != dev ||
44648+ (match->mode & GR_DELETED))) {
44649+ match = match->next;
44650+ }
44651+
44652+ if (match && !(match->mode & GR_DELETED))
44653+ return match;
44654+ else
44655+ return NULL;
44656+}
44657+
44658+static struct name_entry *
44659+lookup_name_entry(const char *name)
44660+{
44661+ unsigned int len = strlen(name);
44662+ unsigned int key = full_name_hash(name, len);
44663+ unsigned int index = key % name_set.n_size;
44664+ struct name_entry *match;
44665+
44666+ match = name_set.n_hash[index];
44667+
44668+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
44669+ match = match->next;
44670+
44671+ return match;
44672+}
44673+
44674+static struct name_entry *
44675+lookup_name_entry_create(const char *name)
44676+{
44677+ unsigned int len = strlen(name);
44678+ unsigned int key = full_name_hash(name, len);
44679+ unsigned int index = key % name_set.n_size;
44680+ struct name_entry *match;
44681+
44682+ match = name_set.n_hash[index];
44683+
44684+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44685+ !match->deleted))
44686+ match = match->next;
44687+
44688+ if (match && match->deleted)
44689+ return match;
44690+
44691+ match = name_set.n_hash[index];
44692+
44693+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44694+ match->deleted))
44695+ match = match->next;
44696+
44697+ if (match && !match->deleted)
44698+ return match;
44699+ else
44700+ return NULL;
44701+}
44702+
44703+static struct inodev_entry *
44704+lookup_inodev_entry(const ino_t ino, const dev_t dev)
44705+{
44706+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
44707+ struct inodev_entry *match;
44708+
44709+ match = inodev_set.i_hash[index];
44710+
44711+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
44712+ match = match->next;
44713+
44714+ return match;
44715+}
44716+
44717+static void
44718+insert_inodev_entry(struct inodev_entry *entry)
44719+{
44720+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
44721+ inodev_set.i_size);
44722+ struct inodev_entry **curr;
44723+
44724+ entry->prev = NULL;
44725+
44726+ curr = &inodev_set.i_hash[index];
44727+ if (*curr != NULL)
44728+ (*curr)->prev = entry;
44729+
44730+ entry->next = *curr;
44731+ *curr = entry;
44732+
44733+ return;
44734+}
44735+
44736+static void
44737+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
44738+{
44739+ unsigned int index =
44740+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
44741+ struct acl_role_label **curr;
44742+ struct acl_role_label *tmp;
44743+
44744+ curr = &acl_role_set.r_hash[index];
44745+
44746+ /* if role was already inserted due to domains and already has
44747+ a role in the same bucket as it attached, then we need to
44748+ combine these two buckets
44749+ */
44750+ if (role->next) {
44751+ tmp = role->next;
44752+ while (tmp->next)
44753+ tmp = tmp->next;
44754+ tmp->next = *curr;
44755+ } else
44756+ role->next = *curr;
44757+ *curr = role;
44758+
44759+ return;
44760+}
44761+
44762+static void
44763+insert_acl_role_label(struct acl_role_label *role)
44764+{
44765+ int i;
44766+
44767+ if (role_list == NULL) {
44768+ role_list = role;
44769+ role->prev = NULL;
44770+ } else {
44771+ role->prev = role_list;
44772+ role_list = role;
44773+ }
44774+
44775+ /* used for hash chains */
44776+ role->next = NULL;
44777+
44778+ if (role->roletype & GR_ROLE_DOMAIN) {
44779+ for (i = 0; i < role->domain_child_num; i++)
44780+ __insert_acl_role_label(role, role->domain_children[i]);
44781+ } else
44782+ __insert_acl_role_label(role, role->uidgid);
44783+}
44784+
44785+static int
44786+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
44787+{
44788+ struct name_entry **curr, *nentry;
44789+ struct inodev_entry *ientry;
44790+ unsigned int len = strlen(name);
44791+ unsigned int key = full_name_hash(name, len);
44792+ unsigned int index = key % name_set.n_size;
44793+
44794+ curr = &name_set.n_hash[index];
44795+
44796+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
44797+ curr = &((*curr)->next);
44798+
44799+ if (*curr != NULL)
44800+ return 1;
44801+
44802+ nentry = acl_alloc(sizeof (struct name_entry));
44803+ if (nentry == NULL)
44804+ return 0;
44805+ ientry = acl_alloc(sizeof (struct inodev_entry));
44806+ if (ientry == NULL)
44807+ return 0;
44808+ ientry->nentry = nentry;
44809+
44810+ nentry->key = key;
44811+ nentry->name = name;
44812+ nentry->inode = inode;
44813+ nentry->device = device;
44814+ nentry->len = len;
44815+ nentry->deleted = deleted;
44816+
44817+ nentry->prev = NULL;
44818+ curr = &name_set.n_hash[index];
44819+ if (*curr != NULL)
44820+ (*curr)->prev = nentry;
44821+ nentry->next = *curr;
44822+ *curr = nentry;
44823+
44824+ /* insert us into the table searchable by inode/dev */
44825+ insert_inodev_entry(ientry);
44826+
44827+ return 1;
44828+}
44829+
44830+static void
44831+insert_acl_obj_label(struct acl_object_label *obj,
44832+ struct acl_subject_label *subj)
44833+{
44834+ unsigned int index =
44835+ fhash(obj->inode, obj->device, subj->obj_hash_size);
44836+ struct acl_object_label **curr;
44837+
44838+
44839+ obj->prev = NULL;
44840+
44841+ curr = &subj->obj_hash[index];
44842+ if (*curr != NULL)
44843+ (*curr)->prev = obj;
44844+
44845+ obj->next = *curr;
44846+ *curr = obj;
44847+
44848+ return;
44849+}
44850+
44851+static void
44852+insert_acl_subj_label(struct acl_subject_label *obj,
44853+ struct acl_role_label *role)
44854+{
44855+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
44856+ struct acl_subject_label **curr;
44857+
44858+ obj->prev = NULL;
44859+
44860+ curr = &role->subj_hash[index];
44861+ if (*curr != NULL)
44862+ (*curr)->prev = obj;
44863+
44864+ obj->next = *curr;
44865+ *curr = obj;
44866+
44867+ return;
44868+}
44869+
44870+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
44871+
44872+static void *
44873+create_table(__u32 * len, int elementsize)
44874+{
44875+ unsigned int table_sizes[] = {
44876+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
44877+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
44878+ 4194301, 8388593, 16777213, 33554393, 67108859
44879+ };
44880+ void *newtable = NULL;
44881+ unsigned int pwr = 0;
44882+
44883+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
44884+ table_sizes[pwr] <= *len)
44885+ pwr++;
44886+
44887+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
44888+ return newtable;
44889+
44890+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
44891+ newtable =
44892+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
44893+ else
44894+ newtable = vmalloc(table_sizes[pwr] * elementsize);
44895+
44896+ *len = table_sizes[pwr];
44897+
44898+ return newtable;
44899+}
44900+
44901+static int
44902+init_variables(const struct gr_arg *arg)
44903+{
44904+ struct task_struct *reaper = &init_task;
44905+ unsigned int stacksize;
44906+
44907+ subj_map_set.s_size = arg->role_db.num_subjects;
44908+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
44909+ name_set.n_size = arg->role_db.num_objects;
44910+ inodev_set.i_size = arg->role_db.num_objects;
44911+
44912+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
44913+ !name_set.n_size || !inodev_set.i_size)
44914+ return 1;
44915+
44916+ if (!gr_init_uidset())
44917+ return 1;
44918+
44919+ /* set up the stack that holds allocation info */
44920+
44921+ stacksize = arg->role_db.num_pointers + 5;
44922+
44923+ if (!acl_alloc_stack_init(stacksize))
44924+ return 1;
44925+
44926+ /* grab reference for the real root dentry and vfsmount */
44927+ get_fs_root(reaper->fs, &real_root);
44928+
44929+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44930+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
44931+#endif
44932+
44933+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
44934+ if (fakefs_obj_rw == NULL)
44935+ return 1;
44936+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
44937+
44938+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
44939+ if (fakefs_obj_rwx == NULL)
44940+ return 1;
44941+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
44942+
44943+ subj_map_set.s_hash =
44944+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
44945+ acl_role_set.r_hash =
44946+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
44947+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
44948+ inodev_set.i_hash =
44949+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
44950+
44951+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
44952+ !name_set.n_hash || !inodev_set.i_hash)
44953+ return 1;
44954+
44955+ memset(subj_map_set.s_hash, 0,
44956+ sizeof(struct subject_map *) * subj_map_set.s_size);
44957+ memset(acl_role_set.r_hash, 0,
44958+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
44959+ memset(name_set.n_hash, 0,
44960+ sizeof (struct name_entry *) * name_set.n_size);
44961+ memset(inodev_set.i_hash, 0,
44962+ sizeof (struct inodev_entry *) * inodev_set.i_size);
44963+
44964+ return 0;
44965+}
44966+
44967+/* free information not needed after startup
44968+ currently contains user->kernel pointer mappings for subjects
44969+*/
44970+
44971+static void
44972+free_init_variables(void)
44973+{
44974+ __u32 i;
44975+
44976+ if (subj_map_set.s_hash) {
44977+ for (i = 0; i < subj_map_set.s_size; i++) {
44978+ if (subj_map_set.s_hash[i]) {
44979+ kfree(subj_map_set.s_hash[i]);
44980+ subj_map_set.s_hash[i] = NULL;
44981+ }
44982+ }
44983+
44984+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
44985+ PAGE_SIZE)
44986+ kfree(subj_map_set.s_hash);
44987+ else
44988+ vfree(subj_map_set.s_hash);
44989+ }
44990+
44991+ return;
44992+}
44993+
44994+static void
44995+free_variables(void)
44996+{
44997+ struct acl_subject_label *s;
44998+ struct acl_role_label *r;
44999+ struct task_struct *task, *task2;
45000+ unsigned int x;
45001+
45002+ gr_clear_learn_entries();
45003+
45004+ read_lock(&tasklist_lock);
45005+ do_each_thread(task2, task) {
45006+ task->acl_sp_role = 0;
45007+ task->acl_role_id = 0;
45008+ task->acl = NULL;
45009+ task->role = NULL;
45010+ } while_each_thread(task2, task);
45011+ read_unlock(&tasklist_lock);
45012+
45013+ /* release the reference to the real root dentry and vfsmount */
45014+ path_put(&real_root);
45015+
45016+ /* free all object hash tables */
45017+
45018+ FOR_EACH_ROLE_START(r)
45019+ if (r->subj_hash == NULL)
45020+ goto next_role;
45021+ FOR_EACH_SUBJECT_START(r, s, x)
45022+ if (s->obj_hash == NULL)
45023+ break;
45024+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45025+ kfree(s->obj_hash);
45026+ else
45027+ vfree(s->obj_hash);
45028+ FOR_EACH_SUBJECT_END(s, x)
45029+ FOR_EACH_NESTED_SUBJECT_START(r, s)
45030+ if (s->obj_hash == NULL)
45031+ break;
45032+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45033+ kfree(s->obj_hash);
45034+ else
45035+ vfree(s->obj_hash);
45036+ FOR_EACH_NESTED_SUBJECT_END(s)
45037+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
45038+ kfree(r->subj_hash);
45039+ else
45040+ vfree(r->subj_hash);
45041+ r->subj_hash = NULL;
45042+next_role:
45043+ FOR_EACH_ROLE_END(r)
45044+
45045+ acl_free_all();
45046+
45047+ if (acl_role_set.r_hash) {
45048+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
45049+ PAGE_SIZE)
45050+ kfree(acl_role_set.r_hash);
45051+ else
45052+ vfree(acl_role_set.r_hash);
45053+ }
45054+ if (name_set.n_hash) {
45055+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
45056+ PAGE_SIZE)
45057+ kfree(name_set.n_hash);
45058+ else
45059+ vfree(name_set.n_hash);
45060+ }
45061+
45062+ if (inodev_set.i_hash) {
45063+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
45064+ PAGE_SIZE)
45065+ kfree(inodev_set.i_hash);
45066+ else
45067+ vfree(inodev_set.i_hash);
45068+ }
45069+
45070+ gr_free_uidset();
45071+
45072+ memset(&name_set, 0, sizeof (struct name_db));
45073+ memset(&inodev_set, 0, sizeof (struct inodev_db));
45074+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
45075+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
45076+
45077+ default_role = NULL;
45078+ role_list = NULL;
45079+
45080+ return;
45081+}
45082+
45083+static __u32
45084+count_user_objs(struct acl_object_label *userp)
45085+{
45086+ struct acl_object_label o_tmp;
45087+ __u32 num = 0;
45088+
45089+ while (userp) {
45090+ if (copy_from_user(&o_tmp, userp,
45091+ sizeof (struct acl_object_label)))
45092+ break;
45093+
45094+ userp = o_tmp.prev;
45095+ num++;
45096+ }
45097+
45098+ return num;
45099+}
45100+
45101+static struct acl_subject_label *
45102+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
45103+
45104+static int
45105+copy_user_glob(struct acl_object_label *obj)
45106+{
45107+ struct acl_object_label *g_tmp, **guser;
45108+ unsigned int len;
45109+ char *tmp;
45110+
45111+ if (obj->globbed == NULL)
45112+ return 0;
45113+
45114+ guser = &obj->globbed;
45115+ while (*guser) {
45116+ g_tmp = (struct acl_object_label *)
45117+ acl_alloc(sizeof (struct acl_object_label));
45118+ if (g_tmp == NULL)
45119+ return -ENOMEM;
45120+
45121+ if (copy_from_user(g_tmp, *guser,
45122+ sizeof (struct acl_object_label)))
45123+ return -EFAULT;
45124+
45125+ len = strnlen_user(g_tmp->filename, PATH_MAX);
45126+
45127+ if (!len || len >= PATH_MAX)
45128+ return -EINVAL;
45129+
45130+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45131+ return -ENOMEM;
45132+
45133+ if (copy_from_user(tmp, g_tmp->filename, len))
45134+ return -EFAULT;
45135+ tmp[len-1] = '\0';
45136+ g_tmp->filename = tmp;
45137+
45138+ *guser = g_tmp;
45139+ guser = &(g_tmp->next);
45140+ }
45141+
45142+ return 0;
45143+}
45144+
45145+static int
45146+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
45147+ struct acl_role_label *role)
45148+{
45149+ struct acl_object_label *o_tmp;
45150+ unsigned int len;
45151+ int ret;
45152+ char *tmp;
45153+
45154+ while (userp) {
45155+ if ((o_tmp = (struct acl_object_label *)
45156+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
45157+ return -ENOMEM;
45158+
45159+ if (copy_from_user(o_tmp, userp,
45160+ sizeof (struct acl_object_label)))
45161+ return -EFAULT;
45162+
45163+ userp = o_tmp->prev;
45164+
45165+ len = strnlen_user(o_tmp->filename, PATH_MAX);
45166+
45167+ if (!len || len >= PATH_MAX)
45168+ return -EINVAL;
45169+
45170+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45171+ return -ENOMEM;
45172+
45173+ if (copy_from_user(tmp, o_tmp->filename, len))
45174+ return -EFAULT;
45175+ tmp[len-1] = '\0';
45176+ o_tmp->filename = tmp;
45177+
45178+ insert_acl_obj_label(o_tmp, subj);
45179+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
45180+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
45181+ return -ENOMEM;
45182+
45183+ ret = copy_user_glob(o_tmp);
45184+ if (ret)
45185+ return ret;
45186+
45187+ if (o_tmp->nested) {
45188+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
45189+ if (IS_ERR(o_tmp->nested))
45190+ return PTR_ERR(o_tmp->nested);
45191+
45192+ /* insert into nested subject list */
45193+ o_tmp->nested->next = role->hash->first;
45194+ role->hash->first = o_tmp->nested;
45195+ }
45196+ }
45197+
45198+ return 0;
45199+}
45200+
45201+static __u32
45202+count_user_subjs(struct acl_subject_label *userp)
45203+{
45204+ struct acl_subject_label s_tmp;
45205+ __u32 num = 0;
45206+
45207+ while (userp) {
45208+ if (copy_from_user(&s_tmp, userp,
45209+ sizeof (struct acl_subject_label)))
45210+ break;
45211+
45212+ userp = s_tmp.prev;
45213+ /* do not count nested subjects against this count, since
45214+ they are not included in the hash table, but are
45215+ attached to objects. We have already counted
45216+ the subjects in userspace for the allocation
45217+ stack
45218+ */
45219+ if (!(s_tmp.mode & GR_NESTED))
45220+ num++;
45221+ }
45222+
45223+ return num;
45224+}
45225+
45226+static int
45227+copy_user_allowedips(struct acl_role_label *rolep)
45228+{
45229+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
45230+
45231+ ruserip = rolep->allowed_ips;
45232+
45233+ while (ruserip) {
45234+ rlast = rtmp;
45235+
45236+ if ((rtmp = (struct role_allowed_ip *)
45237+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
45238+ return -ENOMEM;
45239+
45240+ if (copy_from_user(rtmp, ruserip,
45241+ sizeof (struct role_allowed_ip)))
45242+ return -EFAULT;
45243+
45244+ ruserip = rtmp->prev;
45245+
45246+ if (!rlast) {
45247+ rtmp->prev = NULL;
45248+ rolep->allowed_ips = rtmp;
45249+ } else {
45250+ rlast->next = rtmp;
45251+ rtmp->prev = rlast;
45252+ }
45253+
45254+ if (!ruserip)
45255+ rtmp->next = NULL;
45256+ }
45257+
45258+ return 0;
45259+}
45260+
45261+static int
45262+copy_user_transitions(struct acl_role_label *rolep)
45263+{
45264+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
45265+
45266+ unsigned int len;
45267+ char *tmp;
45268+
45269+ rusertp = rolep->transitions;
45270+
45271+ while (rusertp) {
45272+ rlast = rtmp;
45273+
45274+ if ((rtmp = (struct role_transition *)
45275+ acl_alloc(sizeof (struct role_transition))) == NULL)
45276+ return -ENOMEM;
45277+
45278+ if (copy_from_user(rtmp, rusertp,
45279+ sizeof (struct role_transition)))
45280+ return -EFAULT;
45281+
45282+ rusertp = rtmp->prev;
45283+
45284+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
45285+
45286+ if (!len || len >= GR_SPROLE_LEN)
45287+ return -EINVAL;
45288+
45289+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45290+ return -ENOMEM;
45291+
45292+ if (copy_from_user(tmp, rtmp->rolename, len))
45293+ return -EFAULT;
45294+ tmp[len-1] = '\0';
45295+ rtmp->rolename = tmp;
45296+
45297+ if (!rlast) {
45298+ rtmp->prev = NULL;
45299+ rolep->transitions = rtmp;
45300+ } else {
45301+ rlast->next = rtmp;
45302+ rtmp->prev = rlast;
45303+ }
45304+
45305+ if (!rusertp)
45306+ rtmp->next = NULL;
45307+ }
45308+
45309+ return 0;
45310+}
45311+
45312+static struct acl_subject_label *
45313+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
45314+{
45315+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
45316+ unsigned int len;
45317+ char *tmp;
45318+ __u32 num_objs;
45319+ struct acl_ip_label **i_tmp, *i_utmp2;
45320+ struct gr_hash_struct ghash;
45321+ struct subject_map *subjmap;
45322+ unsigned int i_num;
45323+ int err;
45324+
45325+ s_tmp = lookup_subject_map(userp);
45326+
45327+ /* we've already copied this subject into the kernel, just return
45328+ the reference to it, and don't copy it over again
45329+ */
45330+ if (s_tmp)
45331+ return(s_tmp);
45332+
45333+ if ((s_tmp = (struct acl_subject_label *)
45334+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
45335+ return ERR_PTR(-ENOMEM);
45336+
45337+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
45338+ if (subjmap == NULL)
45339+ return ERR_PTR(-ENOMEM);
45340+
45341+ subjmap->user = userp;
45342+ subjmap->kernel = s_tmp;
45343+ insert_subj_map_entry(subjmap);
45344+
45345+ if (copy_from_user(s_tmp, userp,
45346+ sizeof (struct acl_subject_label)))
45347+ return ERR_PTR(-EFAULT);
45348+
45349+ len = strnlen_user(s_tmp->filename, PATH_MAX);
45350+
45351+ if (!len || len >= PATH_MAX)
45352+ return ERR_PTR(-EINVAL);
45353+
45354+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45355+ return ERR_PTR(-ENOMEM);
45356+
45357+ if (copy_from_user(tmp, s_tmp->filename, len))
45358+ return ERR_PTR(-EFAULT);
45359+ tmp[len-1] = '\0';
45360+ s_tmp->filename = tmp;
45361+
45362+ if (!strcmp(s_tmp->filename, "/"))
45363+ role->root_label = s_tmp;
45364+
45365+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
45366+ return ERR_PTR(-EFAULT);
45367+
45368+ /* copy user and group transition tables */
45369+
45370+ if (s_tmp->user_trans_num) {
45371+ uid_t *uidlist;
45372+
45373+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
45374+ if (uidlist == NULL)
45375+ return ERR_PTR(-ENOMEM);
45376+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
45377+ return ERR_PTR(-EFAULT);
45378+
45379+ s_tmp->user_transitions = uidlist;
45380+ }
45381+
45382+ if (s_tmp->group_trans_num) {
45383+ gid_t *gidlist;
45384+
45385+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
45386+ if (gidlist == NULL)
45387+ return ERR_PTR(-ENOMEM);
45388+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
45389+ return ERR_PTR(-EFAULT);
45390+
45391+ s_tmp->group_transitions = gidlist;
45392+ }
45393+
45394+ /* set up object hash table */
45395+ num_objs = count_user_objs(ghash.first);
45396+
45397+ s_tmp->obj_hash_size = num_objs;
45398+ s_tmp->obj_hash =
45399+ (struct acl_object_label **)
45400+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
45401+
45402+ if (!s_tmp->obj_hash)
45403+ return ERR_PTR(-ENOMEM);
45404+
45405+ memset(s_tmp->obj_hash, 0,
45406+ s_tmp->obj_hash_size *
45407+ sizeof (struct acl_object_label *));
45408+
45409+ /* add in objects */
45410+ err = copy_user_objs(ghash.first, s_tmp, role);
45411+
45412+ if (err)
45413+ return ERR_PTR(err);
45414+
45415+ /* set pointer for parent subject */
45416+ if (s_tmp->parent_subject) {
45417+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
45418+
45419+ if (IS_ERR(s_tmp2))
45420+ return s_tmp2;
45421+
45422+ s_tmp->parent_subject = s_tmp2;
45423+ }
45424+
45425+ /* add in ip acls */
45426+
45427+ if (!s_tmp->ip_num) {
45428+ s_tmp->ips = NULL;
45429+ goto insert;
45430+ }
45431+
45432+ i_tmp =
45433+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
45434+ sizeof (struct acl_ip_label *));
45435+
45436+ if (!i_tmp)
45437+ return ERR_PTR(-ENOMEM);
45438+
45439+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
45440+ *(i_tmp + i_num) =
45441+ (struct acl_ip_label *)
45442+ acl_alloc(sizeof (struct acl_ip_label));
45443+ if (!*(i_tmp + i_num))
45444+ return ERR_PTR(-ENOMEM);
45445+
45446+ if (copy_from_user
45447+ (&i_utmp2, s_tmp->ips + i_num,
45448+ sizeof (struct acl_ip_label *)))
45449+ return ERR_PTR(-EFAULT);
45450+
45451+ if (copy_from_user
45452+ (*(i_tmp + i_num), i_utmp2,
45453+ sizeof (struct acl_ip_label)))
45454+ return ERR_PTR(-EFAULT);
45455+
45456+ if ((*(i_tmp + i_num))->iface == NULL)
45457+ continue;
45458+
45459+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
45460+ if (!len || len >= IFNAMSIZ)
45461+ return ERR_PTR(-EINVAL);
45462+ tmp = acl_alloc(len);
45463+ if (tmp == NULL)
45464+ return ERR_PTR(-ENOMEM);
45465+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
45466+ return ERR_PTR(-EFAULT);
45467+ (*(i_tmp + i_num))->iface = tmp;
45468+ }
45469+
45470+ s_tmp->ips = i_tmp;
45471+
45472+insert:
45473+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
45474+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
45475+ return ERR_PTR(-ENOMEM);
45476+
45477+ return s_tmp;
45478+}
45479+
45480+static int
45481+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
45482+{
45483+ struct acl_subject_label s_pre;
45484+ struct acl_subject_label * ret;
45485+ int err;
45486+
45487+ while (userp) {
45488+ if (copy_from_user(&s_pre, userp,
45489+ sizeof (struct acl_subject_label)))
45490+ return -EFAULT;
45491+
45492+ /* do not add nested subjects here, add
45493+ while parsing objects
45494+ */
45495+
45496+ if (s_pre.mode & GR_NESTED) {
45497+ userp = s_pre.prev;
45498+ continue;
45499+ }
45500+
45501+ ret = do_copy_user_subj(userp, role);
45502+
45503+ err = PTR_ERR(ret);
45504+ if (IS_ERR(ret))
45505+ return err;
45506+
45507+ insert_acl_subj_label(ret, role);
45508+
45509+ userp = s_pre.prev;
45510+ }
45511+
45512+ return 0;
45513+}
45514+
45515+static int
45516+copy_user_acl(struct gr_arg *arg)
45517+{
45518+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
45519+ struct sprole_pw *sptmp;
45520+ struct gr_hash_struct *ghash;
45521+ uid_t *domainlist;
45522+ unsigned int r_num;
45523+ unsigned int len;
45524+ char *tmp;
45525+ int err = 0;
45526+ __u16 i;
45527+ __u32 num_subjs;
45528+
45529+ /* we need a default and kernel role */
45530+ if (arg->role_db.num_roles < 2)
45531+ return -EINVAL;
45532+
45533+ /* copy special role authentication info from userspace */
45534+
45535+ num_sprole_pws = arg->num_sprole_pws;
45536+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
45537+
45538+ if (!acl_special_roles) {
45539+ err = -ENOMEM;
45540+ goto cleanup;
45541+ }
45542+
45543+ for (i = 0; i < num_sprole_pws; i++) {
45544+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
45545+ if (!sptmp) {
45546+ err = -ENOMEM;
45547+ goto cleanup;
45548+ }
45549+ if (copy_from_user(sptmp, arg->sprole_pws + i,
45550+ sizeof (struct sprole_pw))) {
45551+ err = -EFAULT;
45552+ goto cleanup;
45553+ }
45554+
45555+ len =
45556+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
45557+
45558+ if (!len || len >= GR_SPROLE_LEN) {
45559+ err = -EINVAL;
45560+ goto cleanup;
45561+ }
45562+
45563+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
45564+ err = -ENOMEM;
45565+ goto cleanup;
45566+ }
45567+
45568+ if (copy_from_user(tmp, sptmp->rolename, len)) {
45569+ err = -EFAULT;
45570+ goto cleanup;
45571+ }
45572+ tmp[len-1] = '\0';
45573+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45574+ printk(KERN_ALERT "Copying special role %s\n", tmp);
45575+#endif
45576+ sptmp->rolename = tmp;
45577+ acl_special_roles[i] = sptmp;
45578+ }
45579+
45580+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
45581+
45582+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
45583+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
45584+
45585+ if (!r_tmp) {
45586+ err = -ENOMEM;
45587+ goto cleanup;
45588+ }
45589+
45590+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
45591+ sizeof (struct acl_role_label *))) {
45592+ err = -EFAULT;
45593+ goto cleanup;
45594+ }
45595+
45596+ if (copy_from_user(r_tmp, r_utmp2,
45597+ sizeof (struct acl_role_label))) {
45598+ err = -EFAULT;
45599+ goto cleanup;
45600+ }
45601+
45602+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
45603+
45604+ if (!len || len >= PATH_MAX) {
45605+ err = -EINVAL;
45606+ goto cleanup;
45607+ }
45608+
45609+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
45610+ err = -ENOMEM;
45611+ goto cleanup;
45612+ }
45613+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
45614+ err = -EFAULT;
45615+ goto cleanup;
45616+ }
45617+ tmp[len-1] = '\0';
45618+ r_tmp->rolename = tmp;
45619+
45620+ if (!strcmp(r_tmp->rolename, "default")
45621+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
45622+ default_role = r_tmp;
45623+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
45624+ kernel_role = r_tmp;
45625+ }
45626+
45627+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
45628+ err = -ENOMEM;
45629+ goto cleanup;
45630+ }
45631+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
45632+ err = -EFAULT;
45633+ goto cleanup;
45634+ }
45635+
45636+ r_tmp->hash = ghash;
45637+
45638+ num_subjs = count_user_subjs(r_tmp->hash->first);
45639+
45640+ r_tmp->subj_hash_size = num_subjs;
45641+ r_tmp->subj_hash =
45642+ (struct acl_subject_label **)
45643+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
45644+
45645+ if (!r_tmp->subj_hash) {
45646+ err = -ENOMEM;
45647+ goto cleanup;
45648+ }
45649+
45650+ err = copy_user_allowedips(r_tmp);
45651+ if (err)
45652+ goto cleanup;
45653+
45654+ /* copy domain info */
45655+ if (r_tmp->domain_children != NULL) {
45656+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
45657+ if (domainlist == NULL) {
45658+ err = -ENOMEM;
45659+ goto cleanup;
45660+ }
45661+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
45662+ err = -EFAULT;
45663+ goto cleanup;
45664+ }
45665+ r_tmp->domain_children = domainlist;
45666+ }
45667+
45668+ err = copy_user_transitions(r_tmp);
45669+ if (err)
45670+ goto cleanup;
45671+
45672+ memset(r_tmp->subj_hash, 0,
45673+ r_tmp->subj_hash_size *
45674+ sizeof (struct acl_subject_label *));
45675+
45676+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
45677+
45678+ if (err)
45679+ goto cleanup;
45680+
45681+ /* set nested subject list to null */
45682+ r_tmp->hash->first = NULL;
45683+
45684+ insert_acl_role_label(r_tmp);
45685+ }
45686+
45687+ goto return_err;
45688+ cleanup:
45689+ free_variables();
45690+ return_err:
45691+ return err;
45692+
45693+}
45694+
45695+static int
45696+gracl_init(struct gr_arg *args)
45697+{
45698+ int error = 0;
45699+
45700+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
45701+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
45702+
45703+ if (init_variables(args)) {
45704+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
45705+ error = -ENOMEM;
45706+ free_variables();
45707+ goto out;
45708+ }
45709+
45710+ error = copy_user_acl(args);
45711+ free_init_variables();
45712+ if (error) {
45713+ free_variables();
45714+ goto out;
45715+ }
45716+
45717+ if ((error = gr_set_acls(0))) {
45718+ free_variables();
45719+ goto out;
45720+ }
45721+
45722+ pax_open_kernel();
45723+ gr_status |= GR_READY;
45724+ pax_close_kernel();
45725+
45726+ out:
45727+ return error;
45728+}
45729+
45730+/* derived from glibc fnmatch() 0: match, 1: no match*/
45731+
45732+static int
45733+glob_match(const char *p, const char *n)
45734+{
45735+ char c;
45736+
45737+ while ((c = *p++) != '\0') {
45738+ switch (c) {
45739+ case '?':
45740+ if (*n == '\0')
45741+ return 1;
45742+ else if (*n == '/')
45743+ return 1;
45744+ break;
45745+ case '\\':
45746+ if (*n != c)
45747+ return 1;
45748+ break;
45749+ case '*':
45750+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
45751+ if (*n == '/')
45752+ return 1;
45753+ else if (c == '?') {
45754+ if (*n == '\0')
45755+ return 1;
45756+ else
45757+ ++n;
45758+ }
45759+ }
45760+ if (c == '\0') {
45761+ return 0;
45762+ } else {
45763+ const char *endp;
45764+
45765+ if ((endp = strchr(n, '/')) == NULL)
45766+ endp = n + strlen(n);
45767+
45768+ if (c == '[') {
45769+ for (--p; n < endp; ++n)
45770+ if (!glob_match(p, n))
45771+ return 0;
45772+ } else if (c == '/') {
45773+ while (*n != '\0' && *n != '/')
45774+ ++n;
45775+ if (*n == '/' && !glob_match(p, n + 1))
45776+ return 0;
45777+ } else {
45778+ for (--p; n < endp; ++n)
45779+ if (*n == c && !glob_match(p, n))
45780+ return 0;
45781+ }
45782+
45783+ return 1;
45784+ }
45785+ case '[':
45786+ {
45787+ int not;
45788+ char cold;
45789+
45790+ if (*n == '\0' || *n == '/')
45791+ return 1;
45792+
45793+ not = (*p == '!' || *p == '^');
45794+ if (not)
45795+ ++p;
45796+
45797+ c = *p++;
45798+ for (;;) {
45799+ unsigned char fn = (unsigned char)*n;
45800+
45801+ if (c == '\0')
45802+ return 1;
45803+ else {
45804+ if (c == fn)
45805+ goto matched;
45806+ cold = c;
45807+ c = *p++;
45808+
45809+ if (c == '-' && *p != ']') {
45810+ unsigned char cend = *p++;
45811+
45812+ if (cend == '\0')
45813+ return 1;
45814+
45815+ if (cold <= fn && fn <= cend)
45816+ goto matched;
45817+
45818+ c = *p++;
45819+ }
45820+ }
45821+
45822+ if (c == ']')
45823+ break;
45824+ }
45825+ if (!not)
45826+ return 1;
45827+ break;
45828+ matched:
45829+ while (c != ']') {
45830+ if (c == '\0')
45831+ return 1;
45832+
45833+ c = *p++;
45834+ }
45835+ if (not)
45836+ return 1;
45837+ }
45838+ break;
45839+ default:
45840+ if (c != *n)
45841+ return 1;
45842+ }
45843+
45844+ ++n;
45845+ }
45846+
45847+ if (*n == '\0')
45848+ return 0;
45849+
45850+ if (*n == '/')
45851+ return 0;
45852+
45853+ return 1;
45854+}
45855+
45856+static struct acl_object_label *
45857+chk_glob_label(struct acl_object_label *globbed,
45858+ struct dentry *dentry, struct vfsmount *mnt, char **path)
45859+{
45860+ struct acl_object_label *tmp;
45861+
45862+ if (*path == NULL)
45863+ *path = gr_to_filename_nolock(dentry, mnt);
45864+
45865+ tmp = globbed;
45866+
45867+ while (tmp) {
45868+ if (!glob_match(tmp->filename, *path))
45869+ return tmp;
45870+ tmp = tmp->next;
45871+ }
45872+
45873+ return NULL;
45874+}
45875+
45876+static struct acl_object_label *
45877+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45878+ const ino_t curr_ino, const dev_t curr_dev,
45879+ const struct acl_subject_label *subj, char **path, const int checkglob)
45880+{
45881+ struct acl_subject_label *tmpsubj;
45882+ struct acl_object_label *retval;
45883+ struct acl_object_label *retval2;
45884+
45885+ tmpsubj = (struct acl_subject_label *) subj;
45886+ read_lock(&gr_inode_lock);
45887+ do {
45888+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
45889+ if (retval) {
45890+ if (checkglob && retval->globbed) {
45891+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
45892+ (struct vfsmount *)orig_mnt, path);
45893+ if (retval2)
45894+ retval = retval2;
45895+ }
45896+ break;
45897+ }
45898+ } while ((tmpsubj = tmpsubj->parent_subject));
45899+ read_unlock(&gr_inode_lock);
45900+
45901+ return retval;
45902+}
45903+
45904+static __inline__ struct acl_object_label *
45905+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45906+ struct dentry *curr_dentry,
45907+ const struct acl_subject_label *subj, char **path, const int checkglob)
45908+{
45909+ int newglob = checkglob;
45910+ ino_t inode;
45911+ dev_t device;
45912+
45913+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
45914+ as we don't want a / * rule to match instead of the / object
45915+ don't do this for create lookups that call this function though, since they're looking up
45916+ on the parent and thus need globbing checks on all paths
45917+ */
45918+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
45919+ newglob = GR_NO_GLOB;
45920+
45921+ spin_lock(&curr_dentry->d_lock);
45922+ inode = curr_dentry->d_inode->i_ino;
45923+ device = __get_dev(curr_dentry);
45924+ spin_unlock(&curr_dentry->d_lock);
45925+
45926+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
45927+}
45928+
45929+static struct acl_object_label *
45930+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45931+ const struct acl_subject_label *subj, char *path, const int checkglob)
45932+{
45933+ struct dentry *dentry = (struct dentry *) l_dentry;
45934+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45935+ struct acl_object_label *retval;
45936+ struct dentry *parent;
45937+
45938+ write_seqlock(&rename_lock);
45939+ br_read_lock(vfsmount_lock);
45940+
45941+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
45942+#ifdef CONFIG_NET
45943+ mnt == sock_mnt ||
45944+#endif
45945+#ifdef CONFIG_HUGETLBFS
45946+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
45947+#endif
45948+ /* ignore Eric Biederman */
45949+ IS_PRIVATE(l_dentry->d_inode))) {
45950+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
45951+ goto out;
45952+ }
45953+
45954+ for (;;) {
45955+ if (dentry == real_root.dentry && mnt == real_root.mnt)
45956+ break;
45957+
45958+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45959+ if (mnt->mnt_parent == mnt)
45960+ break;
45961+
45962+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45963+ if (retval != NULL)
45964+ goto out;
45965+
45966+ dentry = mnt->mnt_mountpoint;
45967+ mnt = mnt->mnt_parent;
45968+ continue;
45969+ }
45970+
45971+ parent = dentry->d_parent;
45972+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45973+ if (retval != NULL)
45974+ goto out;
45975+
45976+ dentry = parent;
45977+ }
45978+
45979+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45980+
45981+ /* real_root is pinned so we don't have to hold a reference */
45982+ if (retval == NULL)
45983+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
45984+out:
45985+ br_read_unlock(vfsmount_lock);
45986+ write_sequnlock(&rename_lock);
45987+
45988+ BUG_ON(retval == NULL);
45989+
45990+ return retval;
45991+}
45992+
45993+static __inline__ struct acl_object_label *
45994+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45995+ const struct acl_subject_label *subj)
45996+{
45997+ char *path = NULL;
45998+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
45999+}
46000+
46001+static __inline__ struct acl_object_label *
46002+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46003+ const struct acl_subject_label *subj)
46004+{
46005+ char *path = NULL;
46006+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
46007+}
46008+
46009+static __inline__ struct acl_object_label *
46010+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46011+ const struct acl_subject_label *subj, char *path)
46012+{
46013+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
46014+}
46015+
46016+static struct acl_subject_label *
46017+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46018+ const struct acl_role_label *role)
46019+{
46020+ struct dentry *dentry = (struct dentry *) l_dentry;
46021+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
46022+ struct acl_subject_label *retval;
46023+ struct dentry *parent;
46024+
46025+ write_seqlock(&rename_lock);
46026+ br_read_lock(vfsmount_lock);
46027+
46028+ for (;;) {
46029+ if (dentry == real_root.dentry && mnt == real_root.mnt)
46030+ break;
46031+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
46032+ if (mnt->mnt_parent == mnt)
46033+ break;
46034+
46035+ spin_lock(&dentry->d_lock);
46036+ read_lock(&gr_inode_lock);
46037+ retval =
46038+ lookup_acl_subj_label(dentry->d_inode->i_ino,
46039+ __get_dev(dentry), role);
46040+ read_unlock(&gr_inode_lock);
46041+ spin_unlock(&dentry->d_lock);
46042+ if (retval != NULL)
46043+ goto out;
46044+
46045+ dentry = mnt->mnt_mountpoint;
46046+ mnt = mnt->mnt_parent;
46047+ continue;
46048+ }
46049+
46050+ spin_lock(&dentry->d_lock);
46051+ read_lock(&gr_inode_lock);
46052+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46053+ __get_dev(dentry), role);
46054+ read_unlock(&gr_inode_lock);
46055+ parent = dentry->d_parent;
46056+ spin_unlock(&dentry->d_lock);
46057+
46058+ if (retval != NULL)
46059+ goto out;
46060+
46061+ dentry = parent;
46062+ }
46063+
46064+ spin_lock(&dentry->d_lock);
46065+ read_lock(&gr_inode_lock);
46066+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46067+ __get_dev(dentry), role);
46068+ read_unlock(&gr_inode_lock);
46069+ spin_unlock(&dentry->d_lock);
46070+
46071+ if (unlikely(retval == NULL)) {
46072+ /* real_root is pinned, we don't need to hold a reference */
46073+ read_lock(&gr_inode_lock);
46074+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
46075+ __get_dev(real_root.dentry), role);
46076+ read_unlock(&gr_inode_lock);
46077+ }
46078+out:
46079+ br_read_unlock(vfsmount_lock);
46080+ write_sequnlock(&rename_lock);
46081+
46082+ BUG_ON(retval == NULL);
46083+
46084+ return retval;
46085+}
46086+
46087+static void
46088+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
46089+{
46090+ struct task_struct *task = current;
46091+ const struct cred *cred = current_cred();
46092+
46093+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46094+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46095+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46096+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
46097+
46098+ return;
46099+}
46100+
46101+static void
46102+gr_log_learn_sysctl(const char *path, const __u32 mode)
46103+{
46104+ struct task_struct *task = current;
46105+ const struct cred *cred = current_cred();
46106+
46107+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46108+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46109+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46110+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
46111+
46112+ return;
46113+}
46114+
46115+static void
46116+gr_log_learn_id_change(const char type, const unsigned int real,
46117+ const unsigned int effective, const unsigned int fs)
46118+{
46119+ struct task_struct *task = current;
46120+ const struct cred *cred = current_cred();
46121+
46122+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
46123+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46124+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46125+ type, real, effective, fs, &task->signal->saved_ip);
46126+
46127+ return;
46128+}
46129+
46130+__u32
46131+gr_check_link(const struct dentry * new_dentry,
46132+ const struct dentry * parent_dentry,
46133+ const struct vfsmount * parent_mnt,
46134+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
46135+{
46136+ struct acl_object_label *obj;
46137+ __u32 oldmode, newmode;
46138+ __u32 needmode;
46139+
46140+ if (unlikely(!(gr_status & GR_READY)))
46141+ return (GR_CREATE | GR_LINK);
46142+
46143+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
46144+ oldmode = obj->mode;
46145+
46146+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46147+ oldmode |= (GR_CREATE | GR_LINK);
46148+
46149+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
46150+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46151+ needmode |= GR_SETID | GR_AUDIT_SETID;
46152+
46153+ newmode =
46154+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
46155+ oldmode | needmode);
46156+
46157+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
46158+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
46159+ GR_INHERIT | GR_AUDIT_INHERIT);
46160+
46161+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
46162+ goto bad;
46163+
46164+ if ((oldmode & needmode) != needmode)
46165+ goto bad;
46166+
46167+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
46168+ if ((newmode & needmode) != needmode)
46169+ goto bad;
46170+
46171+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
46172+ return newmode;
46173+bad:
46174+ needmode = oldmode;
46175+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46176+ needmode |= GR_SETID;
46177+
46178+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46179+ gr_log_learn(old_dentry, old_mnt, needmode);
46180+ return (GR_CREATE | GR_LINK);
46181+ } else if (newmode & GR_SUPPRESS)
46182+ return GR_SUPPRESS;
46183+ else
46184+ return 0;
46185+}
46186+
46187+__u32
46188+gr_search_file(const struct dentry * dentry, const __u32 mode,
46189+ const struct vfsmount * mnt)
46190+{
46191+ __u32 retval = mode;
46192+ struct acl_subject_label *curracl;
46193+ struct acl_object_label *currobj;
46194+
46195+ if (unlikely(!(gr_status & GR_READY)))
46196+ return (mode & ~GR_AUDITS);
46197+
46198+ curracl = current->acl;
46199+
46200+ currobj = chk_obj_label(dentry, mnt, curracl);
46201+ retval = currobj->mode & mode;
46202+
46203+ /* if we're opening a specified transfer file for writing
46204+ (e.g. /dev/initctl), then transfer our role to init
46205+ */
46206+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
46207+ current->role->roletype & GR_ROLE_PERSIST)) {
46208+ struct task_struct *task = init_pid_ns.child_reaper;
46209+
46210+ if (task->role != current->role) {
46211+ task->acl_sp_role = 0;
46212+ task->acl_role_id = current->acl_role_id;
46213+ task->role = current->role;
46214+ rcu_read_lock();
46215+ read_lock(&grsec_exec_file_lock);
46216+ gr_apply_subject_to_task(task);
46217+ read_unlock(&grsec_exec_file_lock);
46218+ rcu_read_unlock();
46219+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
46220+ }
46221+ }
46222+
46223+ if (unlikely
46224+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
46225+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
46226+ __u32 new_mode = mode;
46227+
46228+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46229+
46230+ retval = new_mode;
46231+
46232+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
46233+ new_mode |= GR_INHERIT;
46234+
46235+ if (!(mode & GR_NOLEARN))
46236+ gr_log_learn(dentry, mnt, new_mode);
46237+ }
46238+
46239+ return retval;
46240+}
46241+
46242+__u32
46243+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
46244+ const struct vfsmount * mnt, const __u32 mode)
46245+{
46246+ struct name_entry *match;
46247+ struct acl_object_label *matchpo;
46248+ struct acl_subject_label *curracl;
46249+ char *path;
46250+ __u32 retval;
46251+
46252+ if (unlikely(!(gr_status & GR_READY)))
46253+ return (mode & ~GR_AUDITS);
46254+
46255+ preempt_disable();
46256+ path = gr_to_filename_rbac(new_dentry, mnt);
46257+ match = lookup_name_entry_create(path);
46258+
46259+ if (!match)
46260+ goto check_parent;
46261+
46262+ curracl = current->acl;
46263+
46264+ read_lock(&gr_inode_lock);
46265+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
46266+ read_unlock(&gr_inode_lock);
46267+
46268+ if (matchpo) {
46269+ if ((matchpo->mode & mode) !=
46270+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
46271+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46272+ __u32 new_mode = mode;
46273+
46274+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46275+
46276+ gr_log_learn(new_dentry, mnt, new_mode);
46277+
46278+ preempt_enable();
46279+ return new_mode;
46280+ }
46281+ preempt_enable();
46282+ return (matchpo->mode & mode);
46283+ }
46284+
46285+ check_parent:
46286+ curracl = current->acl;
46287+
46288+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
46289+ retval = matchpo->mode & mode;
46290+
46291+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
46292+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
46293+ __u32 new_mode = mode;
46294+
46295+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46296+
46297+ gr_log_learn(new_dentry, mnt, new_mode);
46298+ preempt_enable();
46299+ return new_mode;
46300+ }
46301+
46302+ preempt_enable();
46303+ return retval;
46304+}
46305+
46306+int
46307+gr_check_hidden_task(const struct task_struct *task)
46308+{
46309+ if (unlikely(!(gr_status & GR_READY)))
46310+ return 0;
46311+
46312+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
46313+ return 1;
46314+
46315+ return 0;
46316+}
46317+
46318+int
46319+gr_check_protected_task(const struct task_struct *task)
46320+{
46321+ if (unlikely(!(gr_status & GR_READY) || !task))
46322+ return 0;
46323+
46324+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46325+ task->acl != current->acl)
46326+ return 1;
46327+
46328+ return 0;
46329+}
46330+
46331+int
46332+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
46333+{
46334+ struct task_struct *p;
46335+ int ret = 0;
46336+
46337+ if (unlikely(!(gr_status & GR_READY) || !pid))
46338+ return ret;
46339+
46340+ read_lock(&tasklist_lock);
46341+ do_each_pid_task(pid, type, p) {
46342+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46343+ p->acl != current->acl) {
46344+ ret = 1;
46345+ goto out;
46346+ }
46347+ } while_each_pid_task(pid, type, p);
46348+out:
46349+ read_unlock(&tasklist_lock);
46350+
46351+ return ret;
46352+}
46353+
46354+void
46355+gr_copy_label(struct task_struct *tsk)
46356+{
46357+ tsk->signal->used_accept = 0;
46358+ tsk->acl_sp_role = 0;
46359+ tsk->acl_role_id = current->acl_role_id;
46360+ tsk->acl = current->acl;
46361+ tsk->role = current->role;
46362+ tsk->signal->curr_ip = current->signal->curr_ip;
46363+ tsk->signal->saved_ip = current->signal->saved_ip;
46364+ if (current->exec_file)
46365+ get_file(current->exec_file);
46366+ tsk->exec_file = current->exec_file;
46367+ tsk->is_writable = current->is_writable;
46368+ if (unlikely(current->signal->used_accept)) {
46369+ current->signal->curr_ip = 0;
46370+ current->signal->saved_ip = 0;
46371+ }
46372+
46373+ return;
46374+}
46375+
46376+static void
46377+gr_set_proc_res(struct task_struct *task)
46378+{
46379+ struct acl_subject_label *proc;
46380+ unsigned short i;
46381+
46382+ proc = task->acl;
46383+
46384+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
46385+ return;
46386+
46387+ for (i = 0; i < RLIM_NLIMITS; i++) {
46388+ if (!(proc->resmask & (1 << i)))
46389+ continue;
46390+
46391+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
46392+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
46393+ }
46394+
46395+ return;
46396+}
46397+
46398+extern int __gr_process_user_ban(struct user_struct *user);
46399+
46400+int
46401+gr_check_user_change(int real, int effective, int fs)
46402+{
46403+ unsigned int i;
46404+ __u16 num;
46405+ uid_t *uidlist;
46406+ int curuid;
46407+ int realok = 0;
46408+ int effectiveok = 0;
46409+ int fsok = 0;
46410+
46411+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
46412+ struct user_struct *user;
46413+
46414+ if (real == -1)
46415+ goto skipit;
46416+
46417+ user = find_user(real);
46418+ if (user == NULL)
46419+ goto skipit;
46420+
46421+ if (__gr_process_user_ban(user)) {
46422+ /* for find_user */
46423+ free_uid(user);
46424+ return 1;
46425+ }
46426+
46427+ /* for find_user */
46428+ free_uid(user);
46429+
46430+skipit:
46431+#endif
46432+
46433+ if (unlikely(!(gr_status & GR_READY)))
46434+ return 0;
46435+
46436+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46437+ gr_log_learn_id_change('u', real, effective, fs);
46438+
46439+ num = current->acl->user_trans_num;
46440+ uidlist = current->acl->user_transitions;
46441+
46442+ if (uidlist == NULL)
46443+ return 0;
46444+
46445+ if (real == -1)
46446+ realok = 1;
46447+ if (effective == -1)
46448+ effectiveok = 1;
46449+ if (fs == -1)
46450+ fsok = 1;
46451+
46452+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
46453+ for (i = 0; i < num; i++) {
46454+ curuid = (int)uidlist[i];
46455+ if (real == curuid)
46456+ realok = 1;
46457+ if (effective == curuid)
46458+ effectiveok = 1;
46459+ if (fs == curuid)
46460+ fsok = 1;
46461+ }
46462+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
46463+ for (i = 0; i < num; i++) {
46464+ curuid = (int)uidlist[i];
46465+ if (real == curuid)
46466+ break;
46467+ if (effective == curuid)
46468+ break;
46469+ if (fs == curuid)
46470+ break;
46471+ }
46472+ /* not in deny list */
46473+ if (i == num) {
46474+ realok = 1;
46475+ effectiveok = 1;
46476+ fsok = 1;
46477+ }
46478+ }
46479+
46480+ if (realok && effectiveok && fsok)
46481+ return 0;
46482+ else {
46483+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46484+ return 1;
46485+ }
46486+}
46487+
46488+int
46489+gr_check_group_change(int real, int effective, int fs)
46490+{
46491+ unsigned int i;
46492+ __u16 num;
46493+ gid_t *gidlist;
46494+ int curgid;
46495+ int realok = 0;
46496+ int effectiveok = 0;
46497+ int fsok = 0;
46498+
46499+ if (unlikely(!(gr_status & GR_READY)))
46500+ return 0;
46501+
46502+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46503+ gr_log_learn_id_change('g', real, effective, fs);
46504+
46505+ num = current->acl->group_trans_num;
46506+ gidlist = current->acl->group_transitions;
46507+
46508+ if (gidlist == NULL)
46509+ return 0;
46510+
46511+ if (real == -1)
46512+ realok = 1;
46513+ if (effective == -1)
46514+ effectiveok = 1;
46515+ if (fs == -1)
46516+ fsok = 1;
46517+
46518+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
46519+ for (i = 0; i < num; i++) {
46520+ curgid = (int)gidlist[i];
46521+ if (real == curgid)
46522+ realok = 1;
46523+ if (effective == curgid)
46524+ effectiveok = 1;
46525+ if (fs == curgid)
46526+ fsok = 1;
46527+ }
46528+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
46529+ for (i = 0; i < num; i++) {
46530+ curgid = (int)gidlist[i];
46531+ if (real == curgid)
46532+ break;
46533+ if (effective == curgid)
46534+ break;
46535+ if (fs == curgid)
46536+ break;
46537+ }
46538+ /* not in deny list */
46539+ if (i == num) {
46540+ realok = 1;
46541+ effectiveok = 1;
46542+ fsok = 1;
46543+ }
46544+ }
46545+
46546+ if (realok && effectiveok && fsok)
46547+ return 0;
46548+ else {
46549+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46550+ return 1;
46551+ }
46552+}
46553+
46554+void
46555+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
46556+{
46557+ struct acl_role_label *role = task->role;
46558+ struct acl_subject_label *subj = NULL;
46559+ struct acl_object_label *obj;
46560+ struct file *filp;
46561+
46562+ if (unlikely(!(gr_status & GR_READY)))
46563+ return;
46564+
46565+ filp = task->exec_file;
46566+
46567+ /* kernel process, we'll give them the kernel role */
46568+ if (unlikely(!filp)) {
46569+ task->role = kernel_role;
46570+ task->acl = kernel_role->root_label;
46571+ return;
46572+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
46573+ role = lookup_acl_role_label(task, uid, gid);
46574+
46575+ /* perform subject lookup in possibly new role
46576+ we can use this result below in the case where role == task->role
46577+ */
46578+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
46579+
46580+ /* if we changed uid/gid, but result in the same role
46581+ and are using inheritance, don't lose the inherited subject
46582+ if current subject is other than what normal lookup
46583+ would result in, we arrived via inheritance, don't
46584+ lose subject
46585+ */
46586+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
46587+ (subj == task->acl)))
46588+ task->acl = subj;
46589+
46590+ task->role = role;
46591+
46592+ task->is_writable = 0;
46593+
46594+ /* ignore additional mmap checks for processes that are writable
46595+ by the default ACL */
46596+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46597+ if (unlikely(obj->mode & GR_WRITE))
46598+ task->is_writable = 1;
46599+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
46600+ if (unlikely(obj->mode & GR_WRITE))
46601+ task->is_writable = 1;
46602+
46603+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46604+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46605+#endif
46606+
46607+ gr_set_proc_res(task);
46608+
46609+ return;
46610+}
46611+
46612+int
46613+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
46614+ const int unsafe_share)
46615+{
46616+ struct task_struct *task = current;
46617+ struct acl_subject_label *newacl;
46618+ struct acl_object_label *obj;
46619+ __u32 retmode;
46620+
46621+ if (unlikely(!(gr_status & GR_READY)))
46622+ return 0;
46623+
46624+ newacl = chk_subj_label(dentry, mnt, task->role);
46625+
46626+ task_lock(task);
46627+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
46628+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
46629+ !(task->role->roletype & GR_ROLE_GOD) &&
46630+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
46631+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
46632+ task_unlock(task);
46633+ if (unsafe_share)
46634+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
46635+ else
46636+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
46637+ return -EACCES;
46638+ }
46639+ task_unlock(task);
46640+
46641+ obj = chk_obj_label(dentry, mnt, task->acl);
46642+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
46643+
46644+ if (!(task->acl->mode & GR_INHERITLEARN) &&
46645+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
46646+ if (obj->nested)
46647+ task->acl = obj->nested;
46648+ else
46649+ task->acl = newacl;
46650+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
46651+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
46652+
46653+ task->is_writable = 0;
46654+
46655+ /* ignore additional mmap checks for processes that are writable
46656+ by the default ACL */
46657+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
46658+ if (unlikely(obj->mode & GR_WRITE))
46659+ task->is_writable = 1;
46660+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
46661+ if (unlikely(obj->mode & GR_WRITE))
46662+ task->is_writable = 1;
46663+
46664+ gr_set_proc_res(task);
46665+
46666+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46667+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46668+#endif
46669+ return 0;
46670+}
46671+
46672+/* always called with valid inodev ptr */
46673+static void
46674+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
46675+{
46676+ struct acl_object_label *matchpo;
46677+ struct acl_subject_label *matchps;
46678+ struct acl_subject_label *subj;
46679+ struct acl_role_label *role;
46680+ unsigned int x;
46681+
46682+ FOR_EACH_ROLE_START(role)
46683+ FOR_EACH_SUBJECT_START(role, subj, x)
46684+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
46685+ matchpo->mode |= GR_DELETED;
46686+ FOR_EACH_SUBJECT_END(subj,x)
46687+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
46688+ if (subj->inode == ino && subj->device == dev)
46689+ subj->mode |= GR_DELETED;
46690+ FOR_EACH_NESTED_SUBJECT_END(subj)
46691+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
46692+ matchps->mode |= GR_DELETED;
46693+ FOR_EACH_ROLE_END(role)
46694+
46695+ inodev->nentry->deleted = 1;
46696+
46697+ return;
46698+}
46699+
46700+void
46701+gr_handle_delete(const ino_t ino, const dev_t dev)
46702+{
46703+ struct inodev_entry *inodev;
46704+
46705+ if (unlikely(!(gr_status & GR_READY)))
46706+ return;
46707+
46708+ write_lock(&gr_inode_lock);
46709+ inodev = lookup_inodev_entry(ino, dev);
46710+ if (inodev != NULL)
46711+ do_handle_delete(inodev, ino, dev);
46712+ write_unlock(&gr_inode_lock);
46713+
46714+ return;
46715+}
46716+
46717+static void
46718+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
46719+ const ino_t newinode, const dev_t newdevice,
46720+ struct acl_subject_label *subj)
46721+{
46722+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
46723+ struct acl_object_label *match;
46724+
46725+ match = subj->obj_hash[index];
46726+
46727+ while (match && (match->inode != oldinode ||
46728+ match->device != olddevice ||
46729+ !(match->mode & GR_DELETED)))
46730+ match = match->next;
46731+
46732+ if (match && (match->inode == oldinode)
46733+ && (match->device == olddevice)
46734+ && (match->mode & GR_DELETED)) {
46735+ if (match->prev == NULL) {
46736+ subj->obj_hash[index] = match->next;
46737+ if (match->next != NULL)
46738+ match->next->prev = NULL;
46739+ } else {
46740+ match->prev->next = match->next;
46741+ if (match->next != NULL)
46742+ match->next->prev = match->prev;
46743+ }
46744+ match->prev = NULL;
46745+ match->next = NULL;
46746+ match->inode = newinode;
46747+ match->device = newdevice;
46748+ match->mode &= ~GR_DELETED;
46749+
46750+ insert_acl_obj_label(match, subj);
46751+ }
46752+
46753+ return;
46754+}
46755+
46756+static void
46757+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
46758+ const ino_t newinode, const dev_t newdevice,
46759+ struct acl_role_label *role)
46760+{
46761+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
46762+ struct acl_subject_label *match;
46763+
46764+ match = role->subj_hash[index];
46765+
46766+ while (match && (match->inode != oldinode ||
46767+ match->device != olddevice ||
46768+ !(match->mode & GR_DELETED)))
46769+ match = match->next;
46770+
46771+ if (match && (match->inode == oldinode)
46772+ && (match->device == olddevice)
46773+ && (match->mode & GR_DELETED)) {
46774+ if (match->prev == NULL) {
46775+ role->subj_hash[index] = match->next;
46776+ if (match->next != NULL)
46777+ match->next->prev = NULL;
46778+ } else {
46779+ match->prev->next = match->next;
46780+ if (match->next != NULL)
46781+ match->next->prev = match->prev;
46782+ }
46783+ match->prev = NULL;
46784+ match->next = NULL;
46785+ match->inode = newinode;
46786+ match->device = newdevice;
46787+ match->mode &= ~GR_DELETED;
46788+
46789+ insert_acl_subj_label(match, role);
46790+ }
46791+
46792+ return;
46793+}
46794+
46795+static void
46796+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
46797+ const ino_t newinode, const dev_t newdevice)
46798+{
46799+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
46800+ struct inodev_entry *match;
46801+
46802+ match = inodev_set.i_hash[index];
46803+
46804+ while (match && (match->nentry->inode != oldinode ||
46805+ match->nentry->device != olddevice || !match->nentry->deleted))
46806+ match = match->next;
46807+
46808+ if (match && (match->nentry->inode == oldinode)
46809+ && (match->nentry->device == olddevice) &&
46810+ match->nentry->deleted) {
46811+ if (match->prev == NULL) {
46812+ inodev_set.i_hash[index] = match->next;
46813+ if (match->next != NULL)
46814+ match->next->prev = NULL;
46815+ } else {
46816+ match->prev->next = match->next;
46817+ if (match->next != NULL)
46818+ match->next->prev = match->prev;
46819+ }
46820+ match->prev = NULL;
46821+ match->next = NULL;
46822+ match->nentry->inode = newinode;
46823+ match->nentry->device = newdevice;
46824+ match->nentry->deleted = 0;
46825+
46826+ insert_inodev_entry(match);
46827+ }
46828+
46829+ return;
46830+}
46831+
46832+static void
46833+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
46834+ const struct vfsmount *mnt)
46835+{
46836+ struct acl_subject_label *subj;
46837+ struct acl_role_label *role;
46838+ unsigned int x;
46839+ ino_t ino = dentry->d_inode->i_ino;
46840+ dev_t dev = __get_dev(dentry);
46841+
46842+ FOR_EACH_ROLE_START(role)
46843+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
46844+
46845+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
46846+ if ((subj->inode == ino) && (subj->device == dev)) {
46847+ subj->inode = ino;
46848+ subj->device = dev;
46849+ }
46850+ FOR_EACH_NESTED_SUBJECT_END(subj)
46851+ FOR_EACH_SUBJECT_START(role, subj, x)
46852+ update_acl_obj_label(matchn->inode, matchn->device,
46853+ ino, dev, subj);
46854+ FOR_EACH_SUBJECT_END(subj,x)
46855+ FOR_EACH_ROLE_END(role)
46856+
46857+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
46858+
46859+ return;
46860+}
46861+
46862+void
46863+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
46864+{
46865+ struct name_entry *matchn;
46866+
46867+ if (unlikely(!(gr_status & GR_READY)))
46868+ return;
46869+
46870+ preempt_disable();
46871+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
46872+
46873+ if (unlikely((unsigned long)matchn)) {
46874+ write_lock(&gr_inode_lock);
46875+ do_handle_create(matchn, dentry, mnt);
46876+ write_unlock(&gr_inode_lock);
46877+ }
46878+ preempt_enable();
46879+
46880+ return;
46881+}
46882+
46883+void
46884+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
46885+ struct dentry *old_dentry,
46886+ struct dentry *new_dentry,
46887+ struct vfsmount *mnt, const __u8 replace)
46888+{
46889+ struct name_entry *matchn;
46890+ struct inodev_entry *inodev;
46891+ ino_t old_ino = old_dentry->d_inode->i_ino;
46892+ dev_t old_dev = __get_dev(old_dentry);
46893+
46894+ /* vfs_rename swaps the name and parent link for old_dentry and
46895+ new_dentry
46896+ at this point, old_dentry has the new name, parent link, and inode
46897+ for the renamed file
46898+ if a file is being replaced by a rename, new_dentry has the inode
46899+ and name for the replaced file
46900+ */
46901+
46902+ if (unlikely(!(gr_status & GR_READY)))
46903+ return;
46904+
46905+ preempt_disable();
46906+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
46907+
46908+ /* we wouldn't have to check d_inode if it weren't for
46909+ NFS silly-renaming
46910+ */
46911+
46912+ write_lock(&gr_inode_lock);
46913+ if (unlikely(replace && new_dentry->d_inode)) {
46914+ ino_t new_ino = new_dentry->d_inode->i_ino;
46915+ dev_t new_dev = __get_dev(new_dentry);
46916+
46917+ inodev = lookup_inodev_entry(new_ino, new_dev);
46918+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
46919+ do_handle_delete(inodev, new_ino, new_dev);
46920+ }
46921+
46922+ inodev = lookup_inodev_entry(old_ino, old_dev);
46923+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
46924+ do_handle_delete(inodev, old_ino, old_dev);
46925+
46926+ if (unlikely((unsigned long)matchn))
46927+ do_handle_create(matchn, old_dentry, mnt);
46928+
46929+ write_unlock(&gr_inode_lock);
46930+ preempt_enable();
46931+
46932+ return;
46933+}
46934+
46935+static int
46936+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
46937+ unsigned char **sum)
46938+{
46939+ struct acl_role_label *r;
46940+ struct role_allowed_ip *ipp;
46941+ struct role_transition *trans;
46942+ unsigned int i;
46943+ int found = 0;
46944+ u32 curr_ip = current->signal->curr_ip;
46945+
46946+ current->signal->saved_ip = curr_ip;
46947+
46948+ /* check transition table */
46949+
46950+ for (trans = current->role->transitions; trans; trans = trans->next) {
46951+ if (!strcmp(rolename, trans->rolename)) {
46952+ found = 1;
46953+ break;
46954+ }
46955+ }
46956+
46957+ if (!found)
46958+ return 0;
46959+
46960+ /* handle special roles that do not require authentication
46961+ and check ip */
46962+
46963+ FOR_EACH_ROLE_START(r)
46964+ if (!strcmp(rolename, r->rolename) &&
46965+ (r->roletype & GR_ROLE_SPECIAL)) {
46966+ found = 0;
46967+ if (r->allowed_ips != NULL) {
46968+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
46969+ if ((ntohl(curr_ip) & ipp->netmask) ==
46970+ (ntohl(ipp->addr) & ipp->netmask))
46971+ found = 1;
46972+ }
46973+ } else
46974+ found = 2;
46975+ if (!found)
46976+ return 0;
46977+
46978+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
46979+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
46980+ *salt = NULL;
46981+ *sum = NULL;
46982+ return 1;
46983+ }
46984+ }
46985+ FOR_EACH_ROLE_END(r)
46986+
46987+ for (i = 0; i < num_sprole_pws; i++) {
46988+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
46989+ *salt = acl_special_roles[i]->salt;
46990+ *sum = acl_special_roles[i]->sum;
46991+ return 1;
46992+ }
46993+ }
46994+
46995+ return 0;
46996+}
46997+
46998+static void
46999+assign_special_role(char *rolename)
47000+{
47001+ struct acl_object_label *obj;
47002+ struct acl_role_label *r;
47003+ struct acl_role_label *assigned = NULL;
47004+ struct task_struct *tsk;
47005+ struct file *filp;
47006+
47007+ FOR_EACH_ROLE_START(r)
47008+ if (!strcmp(rolename, r->rolename) &&
47009+ (r->roletype & GR_ROLE_SPECIAL)) {
47010+ assigned = r;
47011+ break;
47012+ }
47013+ FOR_EACH_ROLE_END(r)
47014+
47015+ if (!assigned)
47016+ return;
47017+
47018+ read_lock(&tasklist_lock);
47019+ read_lock(&grsec_exec_file_lock);
47020+
47021+ tsk = current->real_parent;
47022+ if (tsk == NULL)
47023+ goto out_unlock;
47024+
47025+ filp = tsk->exec_file;
47026+ if (filp == NULL)
47027+ goto out_unlock;
47028+
47029+ tsk->is_writable = 0;
47030+
47031+ tsk->acl_sp_role = 1;
47032+ tsk->acl_role_id = ++acl_sp_role_value;
47033+ tsk->role = assigned;
47034+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
47035+
47036+ /* ignore additional mmap checks for processes that are writable
47037+ by the default ACL */
47038+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47039+ if (unlikely(obj->mode & GR_WRITE))
47040+ tsk->is_writable = 1;
47041+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
47042+ if (unlikely(obj->mode & GR_WRITE))
47043+ tsk->is_writable = 1;
47044+
47045+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47046+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
47047+#endif
47048+
47049+out_unlock:
47050+ read_unlock(&grsec_exec_file_lock);
47051+ read_unlock(&tasklist_lock);
47052+ return;
47053+}
47054+
47055+int gr_check_secure_terminal(struct task_struct *task)
47056+{
47057+ struct task_struct *p, *p2, *p3;
47058+ struct files_struct *files;
47059+ struct fdtable *fdt;
47060+ struct file *our_file = NULL, *file;
47061+ int i;
47062+
47063+ if (task->signal->tty == NULL)
47064+ return 1;
47065+
47066+ files = get_files_struct(task);
47067+ if (files != NULL) {
47068+ rcu_read_lock();
47069+ fdt = files_fdtable(files);
47070+ for (i=0; i < fdt->max_fds; i++) {
47071+ file = fcheck_files(files, i);
47072+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
47073+ get_file(file);
47074+ our_file = file;
47075+ }
47076+ }
47077+ rcu_read_unlock();
47078+ put_files_struct(files);
47079+ }
47080+
47081+ if (our_file == NULL)
47082+ return 1;
47083+
47084+ read_lock(&tasklist_lock);
47085+ do_each_thread(p2, p) {
47086+ files = get_files_struct(p);
47087+ if (files == NULL ||
47088+ (p->signal && p->signal->tty == task->signal->tty)) {
47089+ if (files != NULL)
47090+ put_files_struct(files);
47091+ continue;
47092+ }
47093+ rcu_read_lock();
47094+ fdt = files_fdtable(files);
47095+ for (i=0; i < fdt->max_fds; i++) {
47096+ file = fcheck_files(files, i);
47097+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
47098+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
47099+ p3 = task;
47100+ while (p3->pid > 0) {
47101+ if (p3 == p)
47102+ break;
47103+ p3 = p3->real_parent;
47104+ }
47105+ if (p3 == p)
47106+ break;
47107+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
47108+ gr_handle_alertkill(p);
47109+ rcu_read_unlock();
47110+ put_files_struct(files);
47111+ read_unlock(&tasklist_lock);
47112+ fput(our_file);
47113+ return 0;
47114+ }
47115+ }
47116+ rcu_read_unlock();
47117+ put_files_struct(files);
47118+ } while_each_thread(p2, p);
47119+ read_unlock(&tasklist_lock);
47120+
47121+ fput(our_file);
47122+ return 1;
47123+}
47124+
47125+ssize_t
47126+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
47127+{
47128+ struct gr_arg_wrapper uwrap;
47129+ unsigned char *sprole_salt = NULL;
47130+ unsigned char *sprole_sum = NULL;
47131+ int error = sizeof (struct gr_arg_wrapper);
47132+ int error2 = 0;
47133+
47134+ mutex_lock(&gr_dev_mutex);
47135+
47136+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
47137+ error = -EPERM;
47138+ goto out;
47139+ }
47140+
47141+ if (count != sizeof (struct gr_arg_wrapper)) {
47142+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
47143+ error = -EINVAL;
47144+ goto out;
47145+ }
47146+
47147+
47148+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
47149+ gr_auth_expires = 0;
47150+ gr_auth_attempts = 0;
47151+ }
47152+
47153+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
47154+ error = -EFAULT;
47155+ goto out;
47156+ }
47157+
47158+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
47159+ error = -EINVAL;
47160+ goto out;
47161+ }
47162+
47163+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
47164+ error = -EFAULT;
47165+ goto out;
47166+ }
47167+
47168+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47169+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47170+ time_after(gr_auth_expires, get_seconds())) {
47171+ error = -EBUSY;
47172+ goto out;
47173+ }
47174+
47175+ /* if non-root trying to do anything other than use a special role,
47176+ do not attempt authentication, do not count towards authentication
47177+ locking
47178+ */
47179+
47180+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
47181+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47182+ current_uid()) {
47183+ error = -EPERM;
47184+ goto out;
47185+ }
47186+
47187+ /* ensure pw and special role name are null terminated */
47188+
47189+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
47190+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
47191+
47192+ /* Okay.
47193+ * We have our enough of the argument structure..(we have yet
47194+ * to copy_from_user the tables themselves) . Copy the tables
47195+ * only if we need them, i.e. for loading operations. */
47196+
47197+ switch (gr_usermode->mode) {
47198+ case GR_STATUS:
47199+ if (gr_status & GR_READY) {
47200+ error = 1;
47201+ if (!gr_check_secure_terminal(current))
47202+ error = 3;
47203+ } else
47204+ error = 2;
47205+ goto out;
47206+ case GR_SHUTDOWN:
47207+ if ((gr_status & GR_READY)
47208+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47209+ pax_open_kernel();
47210+ gr_status &= ~GR_READY;
47211+ pax_close_kernel();
47212+
47213+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
47214+ free_variables();
47215+ memset(gr_usermode, 0, sizeof (struct gr_arg));
47216+ memset(gr_system_salt, 0, GR_SALT_LEN);
47217+ memset(gr_system_sum, 0, GR_SHA_LEN);
47218+ } else if (gr_status & GR_READY) {
47219+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
47220+ error = -EPERM;
47221+ } else {
47222+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
47223+ error = -EAGAIN;
47224+ }
47225+ break;
47226+ case GR_ENABLE:
47227+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
47228+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
47229+ else {
47230+ if (gr_status & GR_READY)
47231+ error = -EAGAIN;
47232+ else
47233+ error = error2;
47234+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
47235+ }
47236+ break;
47237+ case GR_RELOAD:
47238+ if (!(gr_status & GR_READY)) {
47239+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
47240+ error = -EAGAIN;
47241+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47242+ preempt_disable();
47243+
47244+ pax_open_kernel();
47245+ gr_status &= ~GR_READY;
47246+ pax_close_kernel();
47247+
47248+ free_variables();
47249+ if (!(error2 = gracl_init(gr_usermode))) {
47250+ preempt_enable();
47251+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
47252+ } else {
47253+ preempt_enable();
47254+ error = error2;
47255+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47256+ }
47257+ } else {
47258+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47259+ error = -EPERM;
47260+ }
47261+ break;
47262+ case GR_SEGVMOD:
47263+ if (unlikely(!(gr_status & GR_READY))) {
47264+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
47265+ error = -EAGAIN;
47266+ break;
47267+ }
47268+
47269+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47270+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
47271+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
47272+ struct acl_subject_label *segvacl;
47273+ segvacl =
47274+ lookup_acl_subj_label(gr_usermode->segv_inode,
47275+ gr_usermode->segv_device,
47276+ current->role);
47277+ if (segvacl) {
47278+ segvacl->crashes = 0;
47279+ segvacl->expires = 0;
47280+ }
47281+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
47282+ gr_remove_uid(gr_usermode->segv_uid);
47283+ }
47284+ } else {
47285+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
47286+ error = -EPERM;
47287+ }
47288+ break;
47289+ case GR_SPROLE:
47290+ case GR_SPROLEPAM:
47291+ if (unlikely(!(gr_status & GR_READY))) {
47292+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
47293+ error = -EAGAIN;
47294+ break;
47295+ }
47296+
47297+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
47298+ current->role->expires = 0;
47299+ current->role->auth_attempts = 0;
47300+ }
47301+
47302+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47303+ time_after(current->role->expires, get_seconds())) {
47304+ error = -EBUSY;
47305+ goto out;
47306+ }
47307+
47308+ if (lookup_special_role_auth
47309+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
47310+ && ((!sprole_salt && !sprole_sum)
47311+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
47312+ char *p = "";
47313+ assign_special_role(gr_usermode->sp_role);
47314+ read_lock(&tasklist_lock);
47315+ if (current->real_parent)
47316+ p = current->real_parent->role->rolename;
47317+ read_unlock(&tasklist_lock);
47318+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
47319+ p, acl_sp_role_value);
47320+ } else {
47321+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
47322+ error = -EPERM;
47323+ if(!(current->role->auth_attempts++))
47324+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47325+
47326+ goto out;
47327+ }
47328+ break;
47329+ case GR_UNSPROLE:
47330+ if (unlikely(!(gr_status & GR_READY))) {
47331+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
47332+ error = -EAGAIN;
47333+ break;
47334+ }
47335+
47336+ if (current->role->roletype & GR_ROLE_SPECIAL) {
47337+ char *p = "";
47338+ int i = 0;
47339+
47340+ read_lock(&tasklist_lock);
47341+ if (current->real_parent) {
47342+ p = current->real_parent->role->rolename;
47343+ i = current->real_parent->acl_role_id;
47344+ }
47345+ read_unlock(&tasklist_lock);
47346+
47347+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
47348+ gr_set_acls(1);
47349+ } else {
47350+ error = -EPERM;
47351+ goto out;
47352+ }
47353+ break;
47354+ default:
47355+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
47356+ error = -EINVAL;
47357+ break;
47358+ }
47359+
47360+ if (error != -EPERM)
47361+ goto out;
47362+
47363+ if(!(gr_auth_attempts++))
47364+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47365+
47366+ out:
47367+ mutex_unlock(&gr_dev_mutex);
47368+ return error;
47369+}
47370+
47371+/* must be called with
47372+ rcu_read_lock();
47373+ read_lock(&tasklist_lock);
47374+ read_lock(&grsec_exec_file_lock);
47375+*/
47376+int gr_apply_subject_to_task(struct task_struct *task)
47377+{
47378+ struct acl_object_label *obj;
47379+ char *tmpname;
47380+ struct acl_subject_label *tmpsubj;
47381+ struct file *filp;
47382+ struct name_entry *nmatch;
47383+
47384+ filp = task->exec_file;
47385+ if (filp == NULL)
47386+ return 0;
47387+
47388+ /* the following is to apply the correct subject
47389+ on binaries running when the RBAC system
47390+ is enabled, when the binaries have been
47391+ replaced or deleted since their execution
47392+ -----
47393+ when the RBAC system starts, the inode/dev
47394+ from exec_file will be one the RBAC system
47395+ is unaware of. It only knows the inode/dev
47396+ of the present file on disk, or the absence
47397+ of it.
47398+ */
47399+ preempt_disable();
47400+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
47401+
47402+ nmatch = lookup_name_entry(tmpname);
47403+ preempt_enable();
47404+ tmpsubj = NULL;
47405+ if (nmatch) {
47406+ if (nmatch->deleted)
47407+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
47408+ else
47409+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
47410+ if (tmpsubj != NULL)
47411+ task->acl = tmpsubj;
47412+ }
47413+ if (tmpsubj == NULL)
47414+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
47415+ task->role);
47416+ if (task->acl) {
47417+ task->is_writable = 0;
47418+ /* ignore additional mmap checks for processes that are writable
47419+ by the default ACL */
47420+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47421+ if (unlikely(obj->mode & GR_WRITE))
47422+ task->is_writable = 1;
47423+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47424+ if (unlikely(obj->mode & GR_WRITE))
47425+ task->is_writable = 1;
47426+
47427+ gr_set_proc_res(task);
47428+
47429+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47430+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47431+#endif
47432+ } else {
47433+ return 1;
47434+ }
47435+
47436+ return 0;
47437+}
47438+
47439+int
47440+gr_set_acls(const int type)
47441+{
47442+ struct task_struct *task, *task2;
47443+ struct acl_role_label *role = current->role;
47444+ __u16 acl_role_id = current->acl_role_id;
47445+ const struct cred *cred;
47446+ int ret;
47447+
47448+ rcu_read_lock();
47449+ read_lock(&tasklist_lock);
47450+ read_lock(&grsec_exec_file_lock);
47451+ do_each_thread(task2, task) {
47452+ /* check to see if we're called from the exit handler,
47453+ if so, only replace ACLs that have inherited the admin
47454+ ACL */
47455+
47456+ if (type && (task->role != role ||
47457+ task->acl_role_id != acl_role_id))
47458+ continue;
47459+
47460+ task->acl_role_id = 0;
47461+ task->acl_sp_role = 0;
47462+
47463+ if (task->exec_file) {
47464+ cred = __task_cred(task);
47465+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
47466+ ret = gr_apply_subject_to_task(task);
47467+ if (ret) {
47468+ read_unlock(&grsec_exec_file_lock);
47469+ read_unlock(&tasklist_lock);
47470+ rcu_read_unlock();
47471+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
47472+ return ret;
47473+ }
47474+ } else {
47475+ // it's a kernel process
47476+ task->role = kernel_role;
47477+ task->acl = kernel_role->root_label;
47478+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
47479+ task->acl->mode &= ~GR_PROCFIND;
47480+#endif
47481+ }
47482+ } while_each_thread(task2, task);
47483+ read_unlock(&grsec_exec_file_lock);
47484+ read_unlock(&tasklist_lock);
47485+ rcu_read_unlock();
47486+
47487+ return 0;
47488+}
47489+
47490+void
47491+gr_learn_resource(const struct task_struct *task,
47492+ const int res, const unsigned long wanted, const int gt)
47493+{
47494+ struct acl_subject_label *acl;
47495+ const struct cred *cred;
47496+
47497+ if (unlikely((gr_status & GR_READY) &&
47498+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
47499+ goto skip_reslog;
47500+
47501+#ifdef CONFIG_GRKERNSEC_RESLOG
47502+ gr_log_resource(task, res, wanted, gt);
47503+#endif
47504+ skip_reslog:
47505+
47506+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
47507+ return;
47508+
47509+ acl = task->acl;
47510+
47511+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
47512+ !(acl->resmask & (1 << (unsigned short) res))))
47513+ return;
47514+
47515+ if (wanted >= acl->res[res].rlim_cur) {
47516+ unsigned long res_add;
47517+
47518+ res_add = wanted;
47519+ switch (res) {
47520+ case RLIMIT_CPU:
47521+ res_add += GR_RLIM_CPU_BUMP;
47522+ break;
47523+ case RLIMIT_FSIZE:
47524+ res_add += GR_RLIM_FSIZE_BUMP;
47525+ break;
47526+ case RLIMIT_DATA:
47527+ res_add += GR_RLIM_DATA_BUMP;
47528+ break;
47529+ case RLIMIT_STACK:
47530+ res_add += GR_RLIM_STACK_BUMP;
47531+ break;
47532+ case RLIMIT_CORE:
47533+ res_add += GR_RLIM_CORE_BUMP;
47534+ break;
47535+ case RLIMIT_RSS:
47536+ res_add += GR_RLIM_RSS_BUMP;
47537+ break;
47538+ case RLIMIT_NPROC:
47539+ res_add += GR_RLIM_NPROC_BUMP;
47540+ break;
47541+ case RLIMIT_NOFILE:
47542+ res_add += GR_RLIM_NOFILE_BUMP;
47543+ break;
47544+ case RLIMIT_MEMLOCK:
47545+ res_add += GR_RLIM_MEMLOCK_BUMP;
47546+ break;
47547+ case RLIMIT_AS:
47548+ res_add += GR_RLIM_AS_BUMP;
47549+ break;
47550+ case RLIMIT_LOCKS:
47551+ res_add += GR_RLIM_LOCKS_BUMP;
47552+ break;
47553+ case RLIMIT_SIGPENDING:
47554+ res_add += GR_RLIM_SIGPENDING_BUMP;
47555+ break;
47556+ case RLIMIT_MSGQUEUE:
47557+ res_add += GR_RLIM_MSGQUEUE_BUMP;
47558+ break;
47559+ case RLIMIT_NICE:
47560+ res_add += GR_RLIM_NICE_BUMP;
47561+ break;
47562+ case RLIMIT_RTPRIO:
47563+ res_add += GR_RLIM_RTPRIO_BUMP;
47564+ break;
47565+ case RLIMIT_RTTIME:
47566+ res_add += GR_RLIM_RTTIME_BUMP;
47567+ break;
47568+ }
47569+
47570+ acl->res[res].rlim_cur = res_add;
47571+
47572+ if (wanted > acl->res[res].rlim_max)
47573+ acl->res[res].rlim_max = res_add;
47574+
47575+ /* only log the subject filename, since resource logging is supported for
47576+ single-subject learning only */
47577+ rcu_read_lock();
47578+ cred = __task_cred(task);
47579+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47580+ task->role->roletype, cred->uid, cred->gid, acl->filename,
47581+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
47582+ "", (unsigned long) res, &task->signal->saved_ip);
47583+ rcu_read_unlock();
47584+ }
47585+
47586+ return;
47587+}
47588+
47589+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
47590+void
47591+pax_set_initial_flags(struct linux_binprm *bprm)
47592+{
47593+ struct task_struct *task = current;
47594+ struct acl_subject_label *proc;
47595+ unsigned long flags;
47596+
47597+ if (unlikely(!(gr_status & GR_READY)))
47598+ return;
47599+
47600+ flags = pax_get_flags(task);
47601+
47602+ proc = task->acl;
47603+
47604+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
47605+ flags &= ~MF_PAX_PAGEEXEC;
47606+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
47607+ flags &= ~MF_PAX_SEGMEXEC;
47608+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
47609+ flags &= ~MF_PAX_RANDMMAP;
47610+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
47611+ flags &= ~MF_PAX_EMUTRAMP;
47612+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
47613+ flags &= ~MF_PAX_MPROTECT;
47614+
47615+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
47616+ flags |= MF_PAX_PAGEEXEC;
47617+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
47618+ flags |= MF_PAX_SEGMEXEC;
47619+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
47620+ flags |= MF_PAX_RANDMMAP;
47621+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
47622+ flags |= MF_PAX_EMUTRAMP;
47623+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
47624+ flags |= MF_PAX_MPROTECT;
47625+
47626+ pax_set_flags(task, flags);
47627+
47628+ return;
47629+}
47630+#endif
47631+
47632+#ifdef CONFIG_SYSCTL
47633+/* Eric Biederman likes breaking userland ABI and every inode-based security
47634+ system to save 35kb of memory */
47635+
47636+/* we modify the passed in filename, but adjust it back before returning */
47637+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
47638+{
47639+ struct name_entry *nmatch;
47640+ char *p, *lastp = NULL;
47641+ struct acl_object_label *obj = NULL, *tmp;
47642+ struct acl_subject_label *tmpsubj;
47643+ char c = '\0';
47644+
47645+ read_lock(&gr_inode_lock);
47646+
47647+ p = name + len - 1;
47648+ do {
47649+ nmatch = lookup_name_entry(name);
47650+ if (lastp != NULL)
47651+ *lastp = c;
47652+
47653+ if (nmatch == NULL)
47654+ goto next_component;
47655+ tmpsubj = current->acl;
47656+ do {
47657+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
47658+ if (obj != NULL) {
47659+ tmp = obj->globbed;
47660+ while (tmp) {
47661+ if (!glob_match(tmp->filename, name)) {
47662+ obj = tmp;
47663+ goto found_obj;
47664+ }
47665+ tmp = tmp->next;
47666+ }
47667+ goto found_obj;
47668+ }
47669+ } while ((tmpsubj = tmpsubj->parent_subject));
47670+next_component:
47671+ /* end case */
47672+ if (p == name)
47673+ break;
47674+
47675+ while (*p != '/')
47676+ p--;
47677+ if (p == name)
47678+ lastp = p + 1;
47679+ else {
47680+ lastp = p;
47681+ p--;
47682+ }
47683+ c = *lastp;
47684+ *lastp = '\0';
47685+ } while (1);
47686+found_obj:
47687+ read_unlock(&gr_inode_lock);
47688+ /* obj returned will always be non-null */
47689+ return obj;
47690+}
47691+
47692+/* returns 0 when allowing, non-zero on error
47693+ op of 0 is used for readdir, so we don't log the names of hidden files
47694+*/
47695+__u32
47696+gr_handle_sysctl(const struct ctl_table *table, const int op)
47697+{
47698+ struct ctl_table *tmp;
47699+ const char *proc_sys = "/proc/sys";
47700+ char *path;
47701+ struct acl_object_label *obj;
47702+ unsigned short len = 0, pos = 0, depth = 0, i;
47703+ __u32 err = 0;
47704+ __u32 mode = 0;
47705+
47706+ if (unlikely(!(gr_status & GR_READY)))
47707+ return 0;
47708+
47709+ /* for now, ignore operations on non-sysctl entries if it's not a
47710+ readdir*/
47711+ if (table->child != NULL && op != 0)
47712+ return 0;
47713+
47714+ mode |= GR_FIND;
47715+ /* it's only a read if it's an entry, read on dirs is for readdir */
47716+ if (op & MAY_READ)
47717+ mode |= GR_READ;
47718+ if (op & MAY_WRITE)
47719+ mode |= GR_WRITE;
47720+
47721+ preempt_disable();
47722+
47723+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47724+
47725+ /* it's only a read/write if it's an actual entry, not a dir
47726+ (which are opened for readdir)
47727+ */
47728+
47729+ /* convert the requested sysctl entry into a pathname */
47730+
47731+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47732+ len += strlen(tmp->procname);
47733+ len++;
47734+ depth++;
47735+ }
47736+
47737+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
47738+ /* deny */
47739+ goto out;
47740+ }
47741+
47742+ memset(path, 0, PAGE_SIZE);
47743+
47744+ memcpy(path, proc_sys, strlen(proc_sys));
47745+
47746+ pos += strlen(proc_sys);
47747+
47748+ for (; depth > 0; depth--) {
47749+ path[pos] = '/';
47750+ pos++;
47751+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47752+ if (depth == i) {
47753+ memcpy(path + pos, tmp->procname,
47754+ strlen(tmp->procname));
47755+ pos += strlen(tmp->procname);
47756+ }
47757+ i++;
47758+ }
47759+ }
47760+
47761+ obj = gr_lookup_by_name(path, pos);
47762+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
47763+
47764+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
47765+ ((err & mode) != mode))) {
47766+ __u32 new_mode = mode;
47767+
47768+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47769+
47770+ err = 0;
47771+ gr_log_learn_sysctl(path, new_mode);
47772+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
47773+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
47774+ err = -ENOENT;
47775+ } else if (!(err & GR_FIND)) {
47776+ err = -ENOENT;
47777+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
47778+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
47779+ path, (mode & GR_READ) ? " reading" : "",
47780+ (mode & GR_WRITE) ? " writing" : "");
47781+ err = -EACCES;
47782+ } else if ((err & mode) != mode) {
47783+ err = -EACCES;
47784+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
47785+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
47786+ path, (mode & GR_READ) ? " reading" : "",
47787+ (mode & GR_WRITE) ? " writing" : "");
47788+ err = 0;
47789+ } else
47790+ err = 0;
47791+
47792+ out:
47793+ preempt_enable();
47794+
47795+ return err;
47796+}
47797+#endif
47798+
47799+int
47800+gr_handle_proc_ptrace(struct task_struct *task)
47801+{
47802+ struct file *filp;
47803+ struct task_struct *tmp = task;
47804+ struct task_struct *curtemp = current;
47805+ __u32 retmode;
47806+
47807+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47808+ if (unlikely(!(gr_status & GR_READY)))
47809+ return 0;
47810+#endif
47811+
47812+ read_lock(&tasklist_lock);
47813+ read_lock(&grsec_exec_file_lock);
47814+ filp = task->exec_file;
47815+
47816+ while (tmp->pid > 0) {
47817+ if (tmp == curtemp)
47818+ break;
47819+ tmp = tmp->real_parent;
47820+ }
47821+
47822+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47823+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
47824+ read_unlock(&grsec_exec_file_lock);
47825+ read_unlock(&tasklist_lock);
47826+ return 1;
47827+ }
47828+
47829+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47830+ if (!(gr_status & GR_READY)) {
47831+ read_unlock(&grsec_exec_file_lock);
47832+ read_unlock(&tasklist_lock);
47833+ return 0;
47834+ }
47835+#endif
47836+
47837+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
47838+ read_unlock(&grsec_exec_file_lock);
47839+ read_unlock(&tasklist_lock);
47840+
47841+ if (retmode & GR_NOPTRACE)
47842+ return 1;
47843+
47844+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
47845+ && (current->acl != task->acl || (current->acl != current->role->root_label
47846+ && current->pid != task->pid)))
47847+ return 1;
47848+
47849+ return 0;
47850+}
47851+
47852+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
47853+{
47854+ if (unlikely(!(gr_status & GR_READY)))
47855+ return;
47856+
47857+ if (!(current->role->roletype & GR_ROLE_GOD))
47858+ return;
47859+
47860+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
47861+ p->role->rolename, gr_task_roletype_to_char(p),
47862+ p->acl->filename);
47863+}
47864+
47865+int
47866+gr_handle_ptrace(struct task_struct *task, const long request)
47867+{
47868+ struct task_struct *tmp = task;
47869+ struct task_struct *curtemp = current;
47870+ __u32 retmode;
47871+
47872+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47873+ if (unlikely(!(gr_status & GR_READY)))
47874+ return 0;
47875+#endif
47876+
47877+ read_lock(&tasklist_lock);
47878+ while (tmp->pid > 0) {
47879+ if (tmp == curtemp)
47880+ break;
47881+ tmp = tmp->real_parent;
47882+ }
47883+
47884+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47885+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
47886+ read_unlock(&tasklist_lock);
47887+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47888+ return 1;
47889+ }
47890+ read_unlock(&tasklist_lock);
47891+
47892+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47893+ if (!(gr_status & GR_READY))
47894+ return 0;
47895+#endif
47896+
47897+ read_lock(&grsec_exec_file_lock);
47898+ if (unlikely(!task->exec_file)) {
47899+ read_unlock(&grsec_exec_file_lock);
47900+ return 0;
47901+ }
47902+
47903+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
47904+ read_unlock(&grsec_exec_file_lock);
47905+
47906+ if (retmode & GR_NOPTRACE) {
47907+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47908+ return 1;
47909+ }
47910+
47911+ if (retmode & GR_PTRACERD) {
47912+ switch (request) {
47913+ case PTRACE_POKETEXT:
47914+ case PTRACE_POKEDATA:
47915+ case PTRACE_POKEUSR:
47916+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
47917+ case PTRACE_SETREGS:
47918+ case PTRACE_SETFPREGS:
47919+#endif
47920+#ifdef CONFIG_X86
47921+ case PTRACE_SETFPXREGS:
47922+#endif
47923+#ifdef CONFIG_ALTIVEC
47924+ case PTRACE_SETVRREGS:
47925+#endif
47926+ return 1;
47927+ default:
47928+ return 0;
47929+ }
47930+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
47931+ !(current->role->roletype & GR_ROLE_GOD) &&
47932+ (current->acl != task->acl)) {
47933+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47934+ return 1;
47935+ }
47936+
47937+ return 0;
47938+}
47939+
47940+static int is_writable_mmap(const struct file *filp)
47941+{
47942+ struct task_struct *task = current;
47943+ struct acl_object_label *obj, *obj2;
47944+
47945+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
47946+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
47947+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47948+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
47949+ task->role->root_label);
47950+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
47951+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
47952+ return 1;
47953+ }
47954+ }
47955+ return 0;
47956+}
47957+
47958+int
47959+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
47960+{
47961+ __u32 mode;
47962+
47963+ if (unlikely(!file || !(prot & PROT_EXEC)))
47964+ return 1;
47965+
47966+ if (is_writable_mmap(file))
47967+ return 0;
47968+
47969+ mode =
47970+ gr_search_file(file->f_path.dentry,
47971+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47972+ file->f_path.mnt);
47973+
47974+ if (!gr_tpe_allow(file))
47975+ return 0;
47976+
47977+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47978+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47979+ return 0;
47980+ } else if (unlikely(!(mode & GR_EXEC))) {
47981+ return 0;
47982+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47983+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47984+ return 1;
47985+ }
47986+
47987+ return 1;
47988+}
47989+
47990+int
47991+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47992+{
47993+ __u32 mode;
47994+
47995+ if (unlikely(!file || !(prot & PROT_EXEC)))
47996+ return 1;
47997+
47998+ if (is_writable_mmap(file))
47999+ return 0;
48000+
48001+ mode =
48002+ gr_search_file(file->f_path.dentry,
48003+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
48004+ file->f_path.mnt);
48005+
48006+ if (!gr_tpe_allow(file))
48007+ return 0;
48008+
48009+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
48010+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48011+ return 0;
48012+ } else if (unlikely(!(mode & GR_EXEC))) {
48013+ return 0;
48014+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
48015+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48016+ return 1;
48017+ }
48018+
48019+ return 1;
48020+}
48021+
48022+void
48023+gr_acl_handle_psacct(struct task_struct *task, const long code)
48024+{
48025+ unsigned long runtime;
48026+ unsigned long cputime;
48027+ unsigned int wday, cday;
48028+ __u8 whr, chr;
48029+ __u8 wmin, cmin;
48030+ __u8 wsec, csec;
48031+ struct timespec timeval;
48032+
48033+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
48034+ !(task->acl->mode & GR_PROCACCT)))
48035+ return;
48036+
48037+ do_posix_clock_monotonic_gettime(&timeval);
48038+ runtime = timeval.tv_sec - task->start_time.tv_sec;
48039+ wday = runtime / (3600 * 24);
48040+ runtime -= wday * (3600 * 24);
48041+ whr = runtime / 3600;
48042+ runtime -= whr * 3600;
48043+ wmin = runtime / 60;
48044+ runtime -= wmin * 60;
48045+ wsec = runtime;
48046+
48047+ cputime = (task->utime + task->stime) / HZ;
48048+ cday = cputime / (3600 * 24);
48049+ cputime -= cday * (3600 * 24);
48050+ chr = cputime / 3600;
48051+ cputime -= chr * 3600;
48052+ cmin = cputime / 60;
48053+ cputime -= cmin * 60;
48054+ csec = cputime;
48055+
48056+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
48057+
48058+ return;
48059+}
48060+
48061+void gr_set_kernel_label(struct task_struct *task)
48062+{
48063+ if (gr_status & GR_READY) {
48064+ task->role = kernel_role;
48065+ task->acl = kernel_role->root_label;
48066+ }
48067+ return;
48068+}
48069+
48070+#ifdef CONFIG_TASKSTATS
48071+int gr_is_taskstats_denied(int pid)
48072+{
48073+ struct task_struct *task;
48074+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48075+ const struct cred *cred;
48076+#endif
48077+ int ret = 0;
48078+
48079+ /* restrict taskstats viewing to un-chrooted root users
48080+ who have the 'view' subject flag if the RBAC system is enabled
48081+ */
48082+
48083+ rcu_read_lock();
48084+ read_lock(&tasklist_lock);
48085+ task = find_task_by_vpid(pid);
48086+ if (task) {
48087+#ifdef CONFIG_GRKERNSEC_CHROOT
48088+ if (proc_is_chrooted(task))
48089+ ret = -EACCES;
48090+#endif
48091+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48092+ cred = __task_cred(task);
48093+#ifdef CONFIG_GRKERNSEC_PROC_USER
48094+ if (cred->uid != 0)
48095+ ret = -EACCES;
48096+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48097+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
48098+ ret = -EACCES;
48099+#endif
48100+#endif
48101+ if (gr_status & GR_READY) {
48102+ if (!(task->acl->mode & GR_VIEW))
48103+ ret = -EACCES;
48104+ }
48105+ } else
48106+ ret = -ENOENT;
48107+
48108+ read_unlock(&tasklist_lock);
48109+ rcu_read_unlock();
48110+
48111+ return ret;
48112+}
48113+#endif
48114+
48115+/* AUXV entries are filled via a descendant of search_binary_handler
48116+ after we've already applied the subject for the target
48117+*/
48118+int gr_acl_enable_at_secure(void)
48119+{
48120+ if (unlikely(!(gr_status & GR_READY)))
48121+ return 0;
48122+
48123+ if (current->acl->mode & GR_ATSECURE)
48124+ return 1;
48125+
48126+ return 0;
48127+}
48128+
48129+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
48130+{
48131+ struct task_struct *task = current;
48132+ struct dentry *dentry = file->f_path.dentry;
48133+ struct vfsmount *mnt = file->f_path.mnt;
48134+ struct acl_object_label *obj, *tmp;
48135+ struct acl_subject_label *subj;
48136+ unsigned int bufsize;
48137+ int is_not_root;
48138+ char *path;
48139+ dev_t dev = __get_dev(dentry);
48140+
48141+ if (unlikely(!(gr_status & GR_READY)))
48142+ return 1;
48143+
48144+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48145+ return 1;
48146+
48147+ /* ignore Eric Biederman */
48148+ if (IS_PRIVATE(dentry->d_inode))
48149+ return 1;
48150+
48151+ subj = task->acl;
48152+ do {
48153+ obj = lookup_acl_obj_label(ino, dev, subj);
48154+ if (obj != NULL)
48155+ return (obj->mode & GR_FIND) ? 1 : 0;
48156+ } while ((subj = subj->parent_subject));
48157+
48158+ /* this is purely an optimization since we're looking for an object
48159+ for the directory we're doing a readdir on
48160+ if it's possible for any globbed object to match the entry we're
48161+ filling into the directory, then the object we find here will be
48162+ an anchor point with attached globbed objects
48163+ */
48164+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
48165+ if (obj->globbed == NULL)
48166+ return (obj->mode & GR_FIND) ? 1 : 0;
48167+
48168+ is_not_root = ((obj->filename[0] == '/') &&
48169+ (obj->filename[1] == '\0')) ? 0 : 1;
48170+ bufsize = PAGE_SIZE - namelen - is_not_root;
48171+
48172+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
48173+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
48174+ return 1;
48175+
48176+ preempt_disable();
48177+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48178+ bufsize);
48179+
48180+ bufsize = strlen(path);
48181+
48182+ /* if base is "/", don't append an additional slash */
48183+ if (is_not_root)
48184+ *(path + bufsize) = '/';
48185+ memcpy(path + bufsize + is_not_root, name, namelen);
48186+ *(path + bufsize + namelen + is_not_root) = '\0';
48187+
48188+ tmp = obj->globbed;
48189+ while (tmp) {
48190+ if (!glob_match(tmp->filename, path)) {
48191+ preempt_enable();
48192+ return (tmp->mode & GR_FIND) ? 1 : 0;
48193+ }
48194+ tmp = tmp->next;
48195+ }
48196+ preempt_enable();
48197+ return (obj->mode & GR_FIND) ? 1 : 0;
48198+}
48199+
48200+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
48201+EXPORT_SYMBOL(gr_acl_is_enabled);
48202+#endif
48203+EXPORT_SYMBOL(gr_learn_resource);
48204+EXPORT_SYMBOL(gr_set_kernel_label);
48205+#ifdef CONFIG_SECURITY
48206+EXPORT_SYMBOL(gr_check_user_change);
48207+EXPORT_SYMBOL(gr_check_group_change);
48208+#endif
48209+
48210diff -urNp linux-3.0.4/grsecurity/gracl_cap.c linux-3.0.4/grsecurity/gracl_cap.c
48211--- linux-3.0.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
48212+++ linux-3.0.4/grsecurity/gracl_cap.c 2011-08-23 21:48:14.000000000 -0400
48213@@ -0,0 +1,139 @@
48214+#include <linux/kernel.h>
48215+#include <linux/module.h>
48216+#include <linux/sched.h>
48217+#include <linux/gracl.h>
48218+#include <linux/grsecurity.h>
48219+#include <linux/grinternal.h>
48220+
48221+static const char *captab_log[] = {
48222+ "CAP_CHOWN",
48223+ "CAP_DAC_OVERRIDE",
48224+ "CAP_DAC_READ_SEARCH",
48225+ "CAP_FOWNER",
48226+ "CAP_FSETID",
48227+ "CAP_KILL",
48228+ "CAP_SETGID",
48229+ "CAP_SETUID",
48230+ "CAP_SETPCAP",
48231+ "CAP_LINUX_IMMUTABLE",
48232+ "CAP_NET_BIND_SERVICE",
48233+ "CAP_NET_BROADCAST",
48234+ "CAP_NET_ADMIN",
48235+ "CAP_NET_RAW",
48236+ "CAP_IPC_LOCK",
48237+ "CAP_IPC_OWNER",
48238+ "CAP_SYS_MODULE",
48239+ "CAP_SYS_RAWIO",
48240+ "CAP_SYS_CHROOT",
48241+ "CAP_SYS_PTRACE",
48242+ "CAP_SYS_PACCT",
48243+ "CAP_SYS_ADMIN",
48244+ "CAP_SYS_BOOT",
48245+ "CAP_SYS_NICE",
48246+ "CAP_SYS_RESOURCE",
48247+ "CAP_SYS_TIME",
48248+ "CAP_SYS_TTY_CONFIG",
48249+ "CAP_MKNOD",
48250+ "CAP_LEASE",
48251+ "CAP_AUDIT_WRITE",
48252+ "CAP_AUDIT_CONTROL",
48253+ "CAP_SETFCAP",
48254+ "CAP_MAC_OVERRIDE",
48255+ "CAP_MAC_ADMIN",
48256+ "CAP_SYSLOG"
48257+};
48258+
48259+EXPORT_SYMBOL(gr_is_capable);
48260+EXPORT_SYMBOL(gr_is_capable_nolog);
48261+
48262+int
48263+gr_is_capable(const int cap)
48264+{
48265+ struct task_struct *task = current;
48266+ const struct cred *cred = current_cred();
48267+ struct acl_subject_label *curracl;
48268+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48269+ kernel_cap_t cap_audit = __cap_empty_set;
48270+
48271+ if (!gr_acl_is_enabled())
48272+ return 1;
48273+
48274+ curracl = task->acl;
48275+
48276+ cap_drop = curracl->cap_lower;
48277+ cap_mask = curracl->cap_mask;
48278+ cap_audit = curracl->cap_invert_audit;
48279+
48280+ while ((curracl = curracl->parent_subject)) {
48281+ /* if the cap isn't specified in the current computed mask but is specified in the
48282+ current level subject, and is lowered in the current level subject, then add
48283+ it to the set of dropped capabilities
48284+ otherwise, add the current level subject's mask to the current computed mask
48285+ */
48286+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48287+ cap_raise(cap_mask, cap);
48288+ if (cap_raised(curracl->cap_lower, cap))
48289+ cap_raise(cap_drop, cap);
48290+ if (cap_raised(curracl->cap_invert_audit, cap))
48291+ cap_raise(cap_audit, cap);
48292+ }
48293+ }
48294+
48295+ if (!cap_raised(cap_drop, cap)) {
48296+ if (cap_raised(cap_audit, cap))
48297+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
48298+ return 1;
48299+ }
48300+
48301+ curracl = task->acl;
48302+
48303+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
48304+ && cap_raised(cred->cap_effective, cap)) {
48305+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
48306+ task->role->roletype, cred->uid,
48307+ cred->gid, task->exec_file ?
48308+ gr_to_filename(task->exec_file->f_path.dentry,
48309+ task->exec_file->f_path.mnt) : curracl->filename,
48310+ curracl->filename, 0UL,
48311+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
48312+ return 1;
48313+ }
48314+
48315+ if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
48316+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
48317+ return 0;
48318+}
48319+
48320+int
48321+gr_is_capable_nolog(const int cap)
48322+{
48323+ struct acl_subject_label *curracl;
48324+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48325+
48326+ if (!gr_acl_is_enabled())
48327+ return 1;
48328+
48329+ curracl = current->acl;
48330+
48331+ cap_drop = curracl->cap_lower;
48332+ cap_mask = curracl->cap_mask;
48333+
48334+ while ((curracl = curracl->parent_subject)) {
48335+ /* if the cap isn't specified in the current computed mask but is specified in the
48336+ current level subject, and is lowered in the current level subject, then add
48337+ it to the set of dropped capabilities
48338+ otherwise, add the current level subject's mask to the current computed mask
48339+ */
48340+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48341+ cap_raise(cap_mask, cap);
48342+ if (cap_raised(curracl->cap_lower, cap))
48343+ cap_raise(cap_drop, cap);
48344+ }
48345+ }
48346+
48347+ if (!cap_raised(cap_drop, cap))
48348+ return 1;
48349+
48350+ return 0;
48351+}
48352+
48353diff -urNp linux-3.0.4/grsecurity/gracl_fs.c linux-3.0.4/grsecurity/gracl_fs.c
48354--- linux-3.0.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
48355+++ linux-3.0.4/grsecurity/gracl_fs.c 2011-08-23 21:48:14.000000000 -0400
48356@@ -0,0 +1,431 @@
48357+#include <linux/kernel.h>
48358+#include <linux/sched.h>
48359+#include <linux/types.h>
48360+#include <linux/fs.h>
48361+#include <linux/file.h>
48362+#include <linux/stat.h>
48363+#include <linux/grsecurity.h>
48364+#include <linux/grinternal.h>
48365+#include <linux/gracl.h>
48366+
48367+__u32
48368+gr_acl_handle_hidden_file(const struct dentry * dentry,
48369+ const struct vfsmount * mnt)
48370+{
48371+ __u32 mode;
48372+
48373+ if (unlikely(!dentry->d_inode))
48374+ return GR_FIND;
48375+
48376+ mode =
48377+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
48378+
48379+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
48380+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48381+ return mode;
48382+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
48383+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48384+ return 0;
48385+ } else if (unlikely(!(mode & GR_FIND)))
48386+ return 0;
48387+
48388+ return GR_FIND;
48389+}
48390+
48391+__u32
48392+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
48393+ const int fmode)
48394+{
48395+ __u32 reqmode = GR_FIND;
48396+ __u32 mode;
48397+
48398+ if (unlikely(!dentry->d_inode))
48399+ return reqmode;
48400+
48401+ if (unlikely(fmode & O_APPEND))
48402+ reqmode |= GR_APPEND;
48403+ else if (unlikely(fmode & FMODE_WRITE))
48404+ reqmode |= GR_WRITE;
48405+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48406+ reqmode |= GR_READ;
48407+ if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
48408+ reqmode &= ~GR_READ;
48409+ mode =
48410+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48411+ mnt);
48412+
48413+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48414+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48415+ reqmode & GR_READ ? " reading" : "",
48416+ reqmode & GR_WRITE ? " writing" : reqmode &
48417+ GR_APPEND ? " appending" : "");
48418+ return reqmode;
48419+ } else
48420+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48421+ {
48422+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48423+ reqmode & GR_READ ? " reading" : "",
48424+ reqmode & GR_WRITE ? " writing" : reqmode &
48425+ GR_APPEND ? " appending" : "");
48426+ return 0;
48427+ } else if (unlikely((mode & reqmode) != reqmode))
48428+ return 0;
48429+
48430+ return reqmode;
48431+}
48432+
48433+__u32
48434+gr_acl_handle_creat(const struct dentry * dentry,
48435+ const struct dentry * p_dentry,
48436+ const struct vfsmount * p_mnt, const int fmode,
48437+ const int imode)
48438+{
48439+ __u32 reqmode = GR_WRITE | GR_CREATE;
48440+ __u32 mode;
48441+
48442+ if (unlikely(fmode & O_APPEND))
48443+ reqmode |= GR_APPEND;
48444+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48445+ reqmode |= GR_READ;
48446+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
48447+ reqmode |= GR_SETID;
48448+
48449+ mode =
48450+ gr_check_create(dentry, p_dentry, p_mnt,
48451+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48452+
48453+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48454+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48455+ reqmode & GR_READ ? " reading" : "",
48456+ reqmode & GR_WRITE ? " writing" : reqmode &
48457+ GR_APPEND ? " appending" : "");
48458+ return reqmode;
48459+ } else
48460+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48461+ {
48462+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48463+ reqmode & GR_READ ? " reading" : "",
48464+ reqmode & GR_WRITE ? " writing" : reqmode &
48465+ GR_APPEND ? " appending" : "");
48466+ return 0;
48467+ } else if (unlikely((mode & reqmode) != reqmode))
48468+ return 0;
48469+
48470+ return reqmode;
48471+}
48472+
48473+__u32
48474+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
48475+ const int fmode)
48476+{
48477+ __u32 mode, reqmode = GR_FIND;
48478+
48479+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
48480+ reqmode |= GR_EXEC;
48481+ if (fmode & S_IWOTH)
48482+ reqmode |= GR_WRITE;
48483+ if (fmode & S_IROTH)
48484+ reqmode |= GR_READ;
48485+
48486+ mode =
48487+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48488+ mnt);
48489+
48490+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48491+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48492+ reqmode & GR_READ ? " reading" : "",
48493+ reqmode & GR_WRITE ? " writing" : "",
48494+ reqmode & GR_EXEC ? " executing" : "");
48495+ return reqmode;
48496+ } else
48497+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48498+ {
48499+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48500+ reqmode & GR_READ ? " reading" : "",
48501+ reqmode & GR_WRITE ? " writing" : "",
48502+ reqmode & GR_EXEC ? " executing" : "");
48503+ return 0;
48504+ } else if (unlikely((mode & reqmode) != reqmode))
48505+ return 0;
48506+
48507+ return reqmode;
48508+}
48509+
48510+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
48511+{
48512+ __u32 mode;
48513+
48514+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
48515+
48516+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48517+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
48518+ return mode;
48519+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48520+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
48521+ return 0;
48522+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
48523+ return 0;
48524+
48525+ return (reqmode);
48526+}
48527+
48528+__u32
48529+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
48530+{
48531+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
48532+}
48533+
48534+__u32
48535+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
48536+{
48537+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
48538+}
48539+
48540+__u32
48541+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
48542+{
48543+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
48544+}
48545+
48546+__u32
48547+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
48548+{
48549+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
48550+}
48551+
48552+__u32
48553+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
48554+ mode_t mode)
48555+{
48556+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
48557+ return 1;
48558+
48559+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48560+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48561+ GR_FCHMOD_ACL_MSG);
48562+ } else {
48563+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
48564+ }
48565+}
48566+
48567+__u32
48568+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
48569+ mode_t mode)
48570+{
48571+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48572+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48573+ GR_CHMOD_ACL_MSG);
48574+ } else {
48575+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
48576+ }
48577+}
48578+
48579+__u32
48580+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
48581+{
48582+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
48583+}
48584+
48585+__u32
48586+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
48587+{
48588+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
48589+}
48590+
48591+__u32
48592+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
48593+{
48594+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
48595+}
48596+
48597+__u32
48598+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
48599+{
48600+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
48601+ GR_UNIXCONNECT_ACL_MSG);
48602+}
48603+
48604+/* hardlinks require at minimum create permission,
48605+ any additional privilege required is based on the
48606+ privilege of the file being linked to
48607+*/
48608+__u32
48609+gr_acl_handle_link(const struct dentry * new_dentry,
48610+ const struct dentry * parent_dentry,
48611+ const struct vfsmount * parent_mnt,
48612+ const struct dentry * old_dentry,
48613+ const struct vfsmount * old_mnt, const char *to)
48614+{
48615+ __u32 mode;
48616+ __u32 needmode = GR_CREATE | GR_LINK;
48617+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
48618+
48619+ mode =
48620+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
48621+ old_mnt);
48622+
48623+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
48624+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48625+ return mode;
48626+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48627+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48628+ return 0;
48629+ } else if (unlikely((mode & needmode) != needmode))
48630+ return 0;
48631+
48632+ return 1;
48633+}
48634+
48635+__u32
48636+gr_acl_handle_symlink(const struct dentry * new_dentry,
48637+ const struct dentry * parent_dentry,
48638+ const struct vfsmount * parent_mnt, const char *from)
48639+{
48640+ __u32 needmode = GR_WRITE | GR_CREATE;
48641+ __u32 mode;
48642+
48643+ mode =
48644+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
48645+ GR_CREATE | GR_AUDIT_CREATE |
48646+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
48647+
48648+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
48649+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48650+ return mode;
48651+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48652+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48653+ return 0;
48654+ } else if (unlikely((mode & needmode) != needmode))
48655+ return 0;
48656+
48657+ return (GR_WRITE | GR_CREATE);
48658+}
48659+
48660+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
48661+{
48662+ __u32 mode;
48663+
48664+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48665+
48666+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48667+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
48668+ return mode;
48669+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48670+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
48671+ return 0;
48672+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
48673+ return 0;
48674+
48675+ return (reqmode);
48676+}
48677+
48678+__u32
48679+gr_acl_handle_mknod(const struct dentry * new_dentry,
48680+ const struct dentry * parent_dentry,
48681+ const struct vfsmount * parent_mnt,
48682+ const int mode)
48683+{
48684+ __u32 reqmode = GR_WRITE | GR_CREATE;
48685+ if (unlikely(mode & (S_ISUID | S_ISGID)))
48686+ reqmode |= GR_SETID;
48687+
48688+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48689+ reqmode, GR_MKNOD_ACL_MSG);
48690+}
48691+
48692+__u32
48693+gr_acl_handle_mkdir(const struct dentry *new_dentry,
48694+ const struct dentry *parent_dentry,
48695+ const struct vfsmount *parent_mnt)
48696+{
48697+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48698+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
48699+}
48700+
48701+#define RENAME_CHECK_SUCCESS(old, new) \
48702+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
48703+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
48704+
48705+int
48706+gr_acl_handle_rename(struct dentry *new_dentry,
48707+ struct dentry *parent_dentry,
48708+ const struct vfsmount *parent_mnt,
48709+ struct dentry *old_dentry,
48710+ struct inode *old_parent_inode,
48711+ struct vfsmount *old_mnt, const char *newname)
48712+{
48713+ __u32 comp1, comp2;
48714+ int error = 0;
48715+
48716+ if (unlikely(!gr_acl_is_enabled()))
48717+ return 0;
48718+
48719+ if (!new_dentry->d_inode) {
48720+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
48721+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
48722+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
48723+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
48724+ GR_DELETE | GR_AUDIT_DELETE |
48725+ GR_AUDIT_READ | GR_AUDIT_WRITE |
48726+ GR_SUPPRESS, old_mnt);
48727+ } else {
48728+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
48729+ GR_CREATE | GR_DELETE |
48730+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
48731+ GR_AUDIT_READ | GR_AUDIT_WRITE |
48732+ GR_SUPPRESS, parent_mnt);
48733+ comp2 =
48734+ gr_search_file(old_dentry,
48735+ GR_READ | GR_WRITE | GR_AUDIT_READ |
48736+ GR_DELETE | GR_AUDIT_DELETE |
48737+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
48738+ }
48739+
48740+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
48741+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
48742+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48743+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
48744+ && !(comp2 & GR_SUPPRESS)) {
48745+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48746+ error = -EACCES;
48747+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
48748+ error = -EACCES;
48749+
48750+ return error;
48751+}
48752+
48753+void
48754+gr_acl_handle_exit(void)
48755+{
48756+ u16 id;
48757+ char *rolename;
48758+ struct file *exec_file;
48759+
48760+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
48761+ !(current->role->roletype & GR_ROLE_PERSIST))) {
48762+ id = current->acl_role_id;
48763+ rolename = current->role->rolename;
48764+ gr_set_acls(1);
48765+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
48766+ }
48767+
48768+ write_lock(&grsec_exec_file_lock);
48769+ exec_file = current->exec_file;
48770+ current->exec_file = NULL;
48771+ write_unlock(&grsec_exec_file_lock);
48772+
48773+ if (exec_file)
48774+ fput(exec_file);
48775+}
48776+
48777+int
48778+gr_acl_handle_procpidmem(const struct task_struct *task)
48779+{
48780+ if (unlikely(!gr_acl_is_enabled()))
48781+ return 0;
48782+
48783+ if (task != current && task->acl->mode & GR_PROTPROCFD)
48784+ return -EACCES;
48785+
48786+ return 0;
48787+}
48788diff -urNp linux-3.0.4/grsecurity/gracl_ip.c linux-3.0.4/grsecurity/gracl_ip.c
48789--- linux-3.0.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
48790+++ linux-3.0.4/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
48791@@ -0,0 +1,381 @@
48792+#include <linux/kernel.h>
48793+#include <asm/uaccess.h>
48794+#include <asm/errno.h>
48795+#include <net/sock.h>
48796+#include <linux/file.h>
48797+#include <linux/fs.h>
48798+#include <linux/net.h>
48799+#include <linux/in.h>
48800+#include <linux/skbuff.h>
48801+#include <linux/ip.h>
48802+#include <linux/udp.h>
48803+#include <linux/types.h>
48804+#include <linux/sched.h>
48805+#include <linux/netdevice.h>
48806+#include <linux/inetdevice.h>
48807+#include <linux/gracl.h>
48808+#include <linux/grsecurity.h>
48809+#include <linux/grinternal.h>
48810+
48811+#define GR_BIND 0x01
48812+#define GR_CONNECT 0x02
48813+#define GR_INVERT 0x04
48814+#define GR_BINDOVERRIDE 0x08
48815+#define GR_CONNECTOVERRIDE 0x10
48816+#define GR_SOCK_FAMILY 0x20
48817+
48818+static const char * gr_protocols[IPPROTO_MAX] = {
48819+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
48820+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
48821+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
48822+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
48823+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
48824+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
48825+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
48826+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
48827+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
48828+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
48829+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
48830+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
48831+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
48832+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
48833+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
48834+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
48835+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
48836+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
48837+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
48838+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
48839+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
48840+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
48841+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
48842+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
48843+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
48844+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
48845+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
48846+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
48847+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
48848+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
48849+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
48850+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
48851+ };
48852+
48853+static const char * gr_socktypes[SOCK_MAX] = {
48854+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
48855+ "unknown:7", "unknown:8", "unknown:9", "packet"
48856+ };
48857+
48858+static const char * gr_sockfamilies[AF_MAX+1] = {
48859+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
48860+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
48861+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
48862+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
48863+ };
48864+
48865+const char *
48866+gr_proto_to_name(unsigned char proto)
48867+{
48868+ return gr_protocols[proto];
48869+}
48870+
48871+const char *
48872+gr_socktype_to_name(unsigned char type)
48873+{
48874+ return gr_socktypes[type];
48875+}
48876+
48877+const char *
48878+gr_sockfamily_to_name(unsigned char family)
48879+{
48880+ return gr_sockfamilies[family];
48881+}
48882+
48883+int
48884+gr_search_socket(const int domain, const int type, const int protocol)
48885+{
48886+ struct acl_subject_label *curr;
48887+ const struct cred *cred = current_cred();
48888+
48889+ if (unlikely(!gr_acl_is_enabled()))
48890+ goto exit;
48891+
48892+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
48893+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
48894+ goto exit; // let the kernel handle it
48895+
48896+ curr = current->acl;
48897+
48898+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
48899+ /* the family is allowed, if this is PF_INET allow it only if
48900+ the extra sock type/protocol checks pass */
48901+ if (domain == PF_INET)
48902+ goto inet_check;
48903+ goto exit;
48904+ } else {
48905+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48906+ __u32 fakeip = 0;
48907+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48908+ current->role->roletype, cred->uid,
48909+ cred->gid, current->exec_file ?
48910+ gr_to_filename(current->exec_file->f_path.dentry,
48911+ current->exec_file->f_path.mnt) :
48912+ curr->filename, curr->filename,
48913+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
48914+ &current->signal->saved_ip);
48915+ goto exit;
48916+ }
48917+ goto exit_fail;
48918+ }
48919+
48920+inet_check:
48921+ /* the rest of this checking is for IPv4 only */
48922+ if (!curr->ips)
48923+ goto exit;
48924+
48925+ if ((curr->ip_type & (1 << type)) &&
48926+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
48927+ goto exit;
48928+
48929+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48930+ /* we don't place acls on raw sockets , and sometimes
48931+ dgram/ip sockets are opened for ioctl and not
48932+ bind/connect, so we'll fake a bind learn log */
48933+ if (type == SOCK_RAW || type == SOCK_PACKET) {
48934+ __u32 fakeip = 0;
48935+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48936+ current->role->roletype, cred->uid,
48937+ cred->gid, current->exec_file ?
48938+ gr_to_filename(current->exec_file->f_path.dentry,
48939+ current->exec_file->f_path.mnt) :
48940+ curr->filename, curr->filename,
48941+ &fakeip, 0, type,
48942+ protocol, GR_CONNECT, &current->signal->saved_ip);
48943+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
48944+ __u32 fakeip = 0;
48945+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48946+ current->role->roletype, cred->uid,
48947+ cred->gid, current->exec_file ?
48948+ gr_to_filename(current->exec_file->f_path.dentry,
48949+ current->exec_file->f_path.mnt) :
48950+ curr->filename, curr->filename,
48951+ &fakeip, 0, type,
48952+ protocol, GR_BIND, &current->signal->saved_ip);
48953+ }
48954+ /* we'll log when they use connect or bind */
48955+ goto exit;
48956+ }
48957+
48958+exit_fail:
48959+ if (domain == PF_INET)
48960+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
48961+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
48962+ else
48963+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
48964+ gr_socktype_to_name(type), protocol);
48965+
48966+ return 0;
48967+exit:
48968+ return 1;
48969+}
48970+
48971+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
48972+{
48973+ if ((ip->mode & mode) &&
48974+ (ip_port >= ip->low) &&
48975+ (ip_port <= ip->high) &&
48976+ ((ntohl(ip_addr) & our_netmask) ==
48977+ (ntohl(our_addr) & our_netmask))
48978+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
48979+ && (ip->type & (1 << type))) {
48980+ if (ip->mode & GR_INVERT)
48981+ return 2; // specifically denied
48982+ else
48983+ return 1; // allowed
48984+ }
48985+
48986+ return 0; // not specifically allowed, may continue parsing
48987+}
48988+
48989+static int
48990+gr_search_connectbind(const int full_mode, struct sock *sk,
48991+ struct sockaddr_in *addr, const int type)
48992+{
48993+ char iface[IFNAMSIZ] = {0};
48994+ struct acl_subject_label *curr;
48995+ struct acl_ip_label *ip;
48996+ struct inet_sock *isk;
48997+ struct net_device *dev;
48998+ struct in_device *idev;
48999+ unsigned long i;
49000+ int ret;
49001+ int mode = full_mode & (GR_BIND | GR_CONNECT);
49002+ __u32 ip_addr = 0;
49003+ __u32 our_addr;
49004+ __u32 our_netmask;
49005+ char *p;
49006+ __u16 ip_port = 0;
49007+ const struct cred *cred = current_cred();
49008+
49009+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
49010+ return 0;
49011+
49012+ curr = current->acl;
49013+ isk = inet_sk(sk);
49014+
49015+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
49016+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
49017+ addr->sin_addr.s_addr = curr->inaddr_any_override;
49018+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
49019+ struct sockaddr_in saddr;
49020+ int err;
49021+
49022+ saddr.sin_family = AF_INET;
49023+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
49024+ saddr.sin_port = isk->inet_sport;
49025+
49026+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49027+ if (err)
49028+ return err;
49029+
49030+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49031+ if (err)
49032+ return err;
49033+ }
49034+
49035+ if (!curr->ips)
49036+ return 0;
49037+
49038+ ip_addr = addr->sin_addr.s_addr;
49039+ ip_port = ntohs(addr->sin_port);
49040+
49041+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49042+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49043+ current->role->roletype, cred->uid,
49044+ cred->gid, current->exec_file ?
49045+ gr_to_filename(current->exec_file->f_path.dentry,
49046+ current->exec_file->f_path.mnt) :
49047+ curr->filename, curr->filename,
49048+ &ip_addr, ip_port, type,
49049+ sk->sk_protocol, mode, &current->signal->saved_ip);
49050+ return 0;
49051+ }
49052+
49053+ for (i = 0; i < curr->ip_num; i++) {
49054+ ip = *(curr->ips + i);
49055+ if (ip->iface != NULL) {
49056+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
49057+ p = strchr(iface, ':');
49058+ if (p != NULL)
49059+ *p = '\0';
49060+ dev = dev_get_by_name(sock_net(sk), iface);
49061+ if (dev == NULL)
49062+ continue;
49063+ idev = in_dev_get(dev);
49064+ if (idev == NULL) {
49065+ dev_put(dev);
49066+ continue;
49067+ }
49068+ rcu_read_lock();
49069+ for_ifa(idev) {
49070+ if (!strcmp(ip->iface, ifa->ifa_label)) {
49071+ our_addr = ifa->ifa_address;
49072+ our_netmask = 0xffffffff;
49073+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49074+ if (ret == 1) {
49075+ rcu_read_unlock();
49076+ in_dev_put(idev);
49077+ dev_put(dev);
49078+ return 0;
49079+ } else if (ret == 2) {
49080+ rcu_read_unlock();
49081+ in_dev_put(idev);
49082+ dev_put(dev);
49083+ goto denied;
49084+ }
49085+ }
49086+ } endfor_ifa(idev);
49087+ rcu_read_unlock();
49088+ in_dev_put(idev);
49089+ dev_put(dev);
49090+ } else {
49091+ our_addr = ip->addr;
49092+ our_netmask = ip->netmask;
49093+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49094+ if (ret == 1)
49095+ return 0;
49096+ else if (ret == 2)
49097+ goto denied;
49098+ }
49099+ }
49100+
49101+denied:
49102+ if (mode == GR_BIND)
49103+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49104+ else if (mode == GR_CONNECT)
49105+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49106+
49107+ return -EACCES;
49108+}
49109+
49110+int
49111+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
49112+{
49113+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
49114+}
49115+
49116+int
49117+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
49118+{
49119+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
49120+}
49121+
49122+int gr_search_listen(struct socket *sock)
49123+{
49124+ struct sock *sk = sock->sk;
49125+ struct sockaddr_in addr;
49126+
49127+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
49128+ addr.sin_port = inet_sk(sk)->inet_sport;
49129+
49130+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49131+}
49132+
49133+int gr_search_accept(struct socket *sock)
49134+{
49135+ struct sock *sk = sock->sk;
49136+ struct sockaddr_in addr;
49137+
49138+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
49139+ addr.sin_port = inet_sk(sk)->inet_sport;
49140+
49141+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49142+}
49143+
49144+int
49145+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
49146+{
49147+ if (addr)
49148+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
49149+ else {
49150+ struct sockaddr_in sin;
49151+ const struct inet_sock *inet = inet_sk(sk);
49152+
49153+ sin.sin_addr.s_addr = inet->inet_daddr;
49154+ sin.sin_port = inet->inet_dport;
49155+
49156+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49157+ }
49158+}
49159+
49160+int
49161+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
49162+{
49163+ struct sockaddr_in sin;
49164+
49165+ if (unlikely(skb->len < sizeof (struct udphdr)))
49166+ return 0; // skip this packet
49167+
49168+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
49169+ sin.sin_port = udp_hdr(skb)->source;
49170+
49171+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49172+}
49173diff -urNp linux-3.0.4/grsecurity/gracl_learn.c linux-3.0.4/grsecurity/gracl_learn.c
49174--- linux-3.0.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
49175+++ linux-3.0.4/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
49176@@ -0,0 +1,207 @@
49177+#include <linux/kernel.h>
49178+#include <linux/mm.h>
49179+#include <linux/sched.h>
49180+#include <linux/poll.h>
49181+#include <linux/string.h>
49182+#include <linux/file.h>
49183+#include <linux/types.h>
49184+#include <linux/vmalloc.h>
49185+#include <linux/grinternal.h>
49186+
49187+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
49188+ size_t count, loff_t *ppos);
49189+extern int gr_acl_is_enabled(void);
49190+
49191+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
49192+static int gr_learn_attached;
49193+
49194+/* use a 512k buffer */
49195+#define LEARN_BUFFER_SIZE (512 * 1024)
49196+
49197+static DEFINE_SPINLOCK(gr_learn_lock);
49198+static DEFINE_MUTEX(gr_learn_user_mutex);
49199+
49200+/* we need to maintain two buffers, so that the kernel context of grlearn
49201+ uses a semaphore around the userspace copying, and the other kernel contexts
49202+ use a spinlock when copying into the buffer, since they cannot sleep
49203+*/
49204+static char *learn_buffer;
49205+static char *learn_buffer_user;
49206+static int learn_buffer_len;
49207+static int learn_buffer_user_len;
49208+
49209+static ssize_t
49210+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
49211+{
49212+ DECLARE_WAITQUEUE(wait, current);
49213+ ssize_t retval = 0;
49214+
49215+ add_wait_queue(&learn_wait, &wait);
49216+ set_current_state(TASK_INTERRUPTIBLE);
49217+ do {
49218+ mutex_lock(&gr_learn_user_mutex);
49219+ spin_lock(&gr_learn_lock);
49220+ if (learn_buffer_len)
49221+ break;
49222+ spin_unlock(&gr_learn_lock);
49223+ mutex_unlock(&gr_learn_user_mutex);
49224+ if (file->f_flags & O_NONBLOCK) {
49225+ retval = -EAGAIN;
49226+ goto out;
49227+ }
49228+ if (signal_pending(current)) {
49229+ retval = -ERESTARTSYS;
49230+ goto out;
49231+ }
49232+
49233+ schedule();
49234+ } while (1);
49235+
49236+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
49237+ learn_buffer_user_len = learn_buffer_len;
49238+ retval = learn_buffer_len;
49239+ learn_buffer_len = 0;
49240+
49241+ spin_unlock(&gr_learn_lock);
49242+
49243+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
49244+ retval = -EFAULT;
49245+
49246+ mutex_unlock(&gr_learn_user_mutex);
49247+out:
49248+ set_current_state(TASK_RUNNING);
49249+ remove_wait_queue(&learn_wait, &wait);
49250+ return retval;
49251+}
49252+
49253+static unsigned int
49254+poll_learn(struct file * file, poll_table * wait)
49255+{
49256+ poll_wait(file, &learn_wait, wait);
49257+
49258+ if (learn_buffer_len)
49259+ return (POLLIN | POLLRDNORM);
49260+
49261+ return 0;
49262+}
49263+
49264+void
49265+gr_clear_learn_entries(void)
49266+{
49267+ char *tmp;
49268+
49269+ mutex_lock(&gr_learn_user_mutex);
49270+ spin_lock(&gr_learn_lock);
49271+ tmp = learn_buffer;
49272+ learn_buffer = NULL;
49273+ spin_unlock(&gr_learn_lock);
49274+ if (tmp)
49275+ vfree(tmp);
49276+ if (learn_buffer_user != NULL) {
49277+ vfree(learn_buffer_user);
49278+ learn_buffer_user = NULL;
49279+ }
49280+ learn_buffer_len = 0;
49281+ mutex_unlock(&gr_learn_user_mutex);
49282+
49283+ return;
49284+}
49285+
49286+void
49287+gr_add_learn_entry(const char *fmt, ...)
49288+{
49289+ va_list args;
49290+ unsigned int len;
49291+
49292+ if (!gr_learn_attached)
49293+ return;
49294+
49295+ spin_lock(&gr_learn_lock);
49296+
49297+ /* leave a gap at the end so we know when it's "full" but don't have to
49298+ compute the exact length of the string we're trying to append
49299+ */
49300+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
49301+ spin_unlock(&gr_learn_lock);
49302+ wake_up_interruptible(&learn_wait);
49303+ return;
49304+ }
49305+ if (learn_buffer == NULL) {
49306+ spin_unlock(&gr_learn_lock);
49307+ return;
49308+ }
49309+
49310+ va_start(args, fmt);
49311+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
49312+ va_end(args);
49313+
49314+ learn_buffer_len += len + 1;
49315+
49316+ spin_unlock(&gr_learn_lock);
49317+ wake_up_interruptible(&learn_wait);
49318+
49319+ return;
49320+}
49321+
49322+static int
49323+open_learn(struct inode *inode, struct file *file)
49324+{
49325+ if (file->f_mode & FMODE_READ && gr_learn_attached)
49326+ return -EBUSY;
49327+ if (file->f_mode & FMODE_READ) {
49328+ int retval = 0;
49329+ mutex_lock(&gr_learn_user_mutex);
49330+ if (learn_buffer == NULL)
49331+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
49332+ if (learn_buffer_user == NULL)
49333+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
49334+ if (learn_buffer == NULL) {
49335+ retval = -ENOMEM;
49336+ goto out_error;
49337+ }
49338+ if (learn_buffer_user == NULL) {
49339+ retval = -ENOMEM;
49340+ goto out_error;
49341+ }
49342+ learn_buffer_len = 0;
49343+ learn_buffer_user_len = 0;
49344+ gr_learn_attached = 1;
49345+out_error:
49346+ mutex_unlock(&gr_learn_user_mutex);
49347+ return retval;
49348+ }
49349+ return 0;
49350+}
49351+
49352+static int
49353+close_learn(struct inode *inode, struct file *file)
49354+{
49355+ if (file->f_mode & FMODE_READ) {
49356+ char *tmp = NULL;
49357+ mutex_lock(&gr_learn_user_mutex);
49358+ spin_lock(&gr_learn_lock);
49359+ tmp = learn_buffer;
49360+ learn_buffer = NULL;
49361+ spin_unlock(&gr_learn_lock);
49362+ if (tmp)
49363+ vfree(tmp);
49364+ if (learn_buffer_user != NULL) {
49365+ vfree(learn_buffer_user);
49366+ learn_buffer_user = NULL;
49367+ }
49368+ learn_buffer_len = 0;
49369+ learn_buffer_user_len = 0;
49370+ gr_learn_attached = 0;
49371+ mutex_unlock(&gr_learn_user_mutex);
49372+ }
49373+
49374+ return 0;
49375+}
49376+
49377+const struct file_operations grsec_fops = {
49378+ .read = read_learn,
49379+ .write = write_grsec_handler,
49380+ .open = open_learn,
49381+ .release = close_learn,
49382+ .poll = poll_learn,
49383+};
49384diff -urNp linux-3.0.4/grsecurity/gracl_res.c linux-3.0.4/grsecurity/gracl_res.c
49385--- linux-3.0.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
49386+++ linux-3.0.4/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
49387@@ -0,0 +1,68 @@
49388+#include <linux/kernel.h>
49389+#include <linux/sched.h>
49390+#include <linux/gracl.h>
49391+#include <linux/grinternal.h>
49392+
49393+static const char *restab_log[] = {
49394+ [RLIMIT_CPU] = "RLIMIT_CPU",
49395+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
49396+ [RLIMIT_DATA] = "RLIMIT_DATA",
49397+ [RLIMIT_STACK] = "RLIMIT_STACK",
49398+ [RLIMIT_CORE] = "RLIMIT_CORE",
49399+ [RLIMIT_RSS] = "RLIMIT_RSS",
49400+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
49401+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
49402+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
49403+ [RLIMIT_AS] = "RLIMIT_AS",
49404+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
49405+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
49406+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
49407+ [RLIMIT_NICE] = "RLIMIT_NICE",
49408+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
49409+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
49410+ [GR_CRASH_RES] = "RLIMIT_CRASH"
49411+};
49412+
49413+void
49414+gr_log_resource(const struct task_struct *task,
49415+ const int res, const unsigned long wanted, const int gt)
49416+{
49417+ const struct cred *cred;
49418+ unsigned long rlim;
49419+
49420+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
49421+ return;
49422+
49423+ // not yet supported resource
49424+ if (unlikely(!restab_log[res]))
49425+ return;
49426+
49427+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
49428+ rlim = task_rlimit_max(task, res);
49429+ else
49430+ rlim = task_rlimit(task, res);
49431+
49432+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
49433+ return;
49434+
49435+ rcu_read_lock();
49436+ cred = __task_cred(task);
49437+
49438+ if (res == RLIMIT_NPROC &&
49439+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
49440+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
49441+ goto out_rcu_unlock;
49442+ else if (res == RLIMIT_MEMLOCK &&
49443+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
49444+ goto out_rcu_unlock;
49445+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
49446+ goto out_rcu_unlock;
49447+ rcu_read_unlock();
49448+
49449+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
49450+
49451+ return;
49452+out_rcu_unlock:
49453+ rcu_read_unlock();
49454+ return;
49455+}
49456diff -urNp linux-3.0.4/grsecurity/gracl_segv.c linux-3.0.4/grsecurity/gracl_segv.c
49457--- linux-3.0.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
49458+++ linux-3.0.4/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
49459@@ -0,0 +1,299 @@
49460+#include <linux/kernel.h>
49461+#include <linux/mm.h>
49462+#include <asm/uaccess.h>
49463+#include <asm/errno.h>
49464+#include <asm/mman.h>
49465+#include <net/sock.h>
49466+#include <linux/file.h>
49467+#include <linux/fs.h>
49468+#include <linux/net.h>
49469+#include <linux/in.h>
49470+#include <linux/slab.h>
49471+#include <linux/types.h>
49472+#include <linux/sched.h>
49473+#include <linux/timer.h>
49474+#include <linux/gracl.h>
49475+#include <linux/grsecurity.h>
49476+#include <linux/grinternal.h>
49477+
49478+static struct crash_uid *uid_set;
49479+static unsigned short uid_used;
49480+static DEFINE_SPINLOCK(gr_uid_lock);
49481+extern rwlock_t gr_inode_lock;
49482+extern struct acl_subject_label *
49483+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
49484+ struct acl_role_label *role);
49485+
49486+#ifdef CONFIG_BTRFS_FS
49487+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
49488+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
49489+#endif
49490+
49491+static inline dev_t __get_dev(const struct dentry *dentry)
49492+{
49493+#ifdef CONFIG_BTRFS_FS
49494+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
49495+ return get_btrfs_dev_from_inode(dentry->d_inode);
49496+ else
49497+#endif
49498+ return dentry->d_inode->i_sb->s_dev;
49499+}
49500+
49501+int
49502+gr_init_uidset(void)
49503+{
49504+ uid_set =
49505+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
49506+ uid_used = 0;
49507+
49508+ return uid_set ? 1 : 0;
49509+}
49510+
49511+void
49512+gr_free_uidset(void)
49513+{
49514+ if (uid_set)
49515+ kfree(uid_set);
49516+
49517+ return;
49518+}
49519+
49520+int
49521+gr_find_uid(const uid_t uid)
49522+{
49523+ struct crash_uid *tmp = uid_set;
49524+ uid_t buid;
49525+ int low = 0, high = uid_used - 1, mid;
49526+
49527+ while (high >= low) {
49528+ mid = (low + high) >> 1;
49529+ buid = tmp[mid].uid;
49530+ if (buid == uid)
49531+ return mid;
49532+ if (buid > uid)
49533+ high = mid - 1;
49534+ if (buid < uid)
49535+ low = mid + 1;
49536+ }
49537+
49538+ return -1;
49539+}
49540+
49541+static __inline__ void
49542+gr_insertsort(void)
49543+{
49544+ unsigned short i, j;
49545+ struct crash_uid index;
49546+
49547+ for (i = 1; i < uid_used; i++) {
49548+ index = uid_set[i];
49549+ j = i;
49550+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
49551+ uid_set[j] = uid_set[j - 1];
49552+ j--;
49553+ }
49554+ uid_set[j] = index;
49555+ }
49556+
49557+ return;
49558+}
49559+
49560+static __inline__ void
49561+gr_insert_uid(const uid_t uid, const unsigned long expires)
49562+{
49563+ int loc;
49564+
49565+ if (uid_used == GR_UIDTABLE_MAX)
49566+ return;
49567+
49568+ loc = gr_find_uid(uid);
49569+
49570+ if (loc >= 0) {
49571+ uid_set[loc].expires = expires;
49572+ return;
49573+ }
49574+
49575+ uid_set[uid_used].uid = uid;
49576+ uid_set[uid_used].expires = expires;
49577+ uid_used++;
49578+
49579+ gr_insertsort();
49580+
49581+ return;
49582+}
49583+
49584+void
49585+gr_remove_uid(const unsigned short loc)
49586+{
49587+ unsigned short i;
49588+
49589+ for (i = loc + 1; i < uid_used; i++)
49590+ uid_set[i - 1] = uid_set[i];
49591+
49592+ uid_used--;
49593+
49594+ return;
49595+}
49596+
49597+int
49598+gr_check_crash_uid(const uid_t uid)
49599+{
49600+ int loc;
49601+ int ret = 0;
49602+
49603+ if (unlikely(!gr_acl_is_enabled()))
49604+ return 0;
49605+
49606+ spin_lock(&gr_uid_lock);
49607+ loc = gr_find_uid(uid);
49608+
49609+ if (loc < 0)
49610+ goto out_unlock;
49611+
49612+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
49613+ gr_remove_uid(loc);
49614+ else
49615+ ret = 1;
49616+
49617+out_unlock:
49618+ spin_unlock(&gr_uid_lock);
49619+ return ret;
49620+}
49621+
49622+static __inline__ int
49623+proc_is_setxid(const struct cred *cred)
49624+{
49625+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
49626+ cred->uid != cred->fsuid)
49627+ return 1;
49628+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
49629+ cred->gid != cred->fsgid)
49630+ return 1;
49631+
49632+ return 0;
49633+}
49634+
49635+extern int gr_fake_force_sig(int sig, struct task_struct *t);
49636+
49637+void
49638+gr_handle_crash(struct task_struct *task, const int sig)
49639+{
49640+ struct acl_subject_label *curr;
49641+ struct acl_subject_label *curr2;
49642+ struct task_struct *tsk, *tsk2;
49643+ const struct cred *cred;
49644+ const struct cred *cred2;
49645+
49646+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
49647+ return;
49648+
49649+ if (unlikely(!gr_acl_is_enabled()))
49650+ return;
49651+
49652+ curr = task->acl;
49653+
49654+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
49655+ return;
49656+
49657+ if (time_before_eq(curr->expires, get_seconds())) {
49658+ curr->expires = 0;
49659+ curr->crashes = 0;
49660+ }
49661+
49662+ curr->crashes++;
49663+
49664+ if (!curr->expires)
49665+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
49666+
49667+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49668+ time_after(curr->expires, get_seconds())) {
49669+ rcu_read_lock();
49670+ cred = __task_cred(task);
49671+ if (cred->uid && proc_is_setxid(cred)) {
49672+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49673+ spin_lock(&gr_uid_lock);
49674+ gr_insert_uid(cred->uid, curr->expires);
49675+ spin_unlock(&gr_uid_lock);
49676+ curr->expires = 0;
49677+ curr->crashes = 0;
49678+ read_lock(&tasklist_lock);
49679+ do_each_thread(tsk2, tsk) {
49680+ cred2 = __task_cred(tsk);
49681+ if (tsk != task && cred2->uid == cred->uid)
49682+ gr_fake_force_sig(SIGKILL, tsk);
49683+ } while_each_thread(tsk2, tsk);
49684+ read_unlock(&tasklist_lock);
49685+ } else {
49686+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49687+ read_lock(&tasklist_lock);
49688+ do_each_thread(tsk2, tsk) {
49689+ if (likely(tsk != task)) {
49690+ curr2 = tsk->acl;
49691+
49692+ if (curr2->device == curr->device &&
49693+ curr2->inode == curr->inode)
49694+ gr_fake_force_sig(SIGKILL, tsk);
49695+ }
49696+ } while_each_thread(tsk2, tsk);
49697+ read_unlock(&tasklist_lock);
49698+ }
49699+ rcu_read_unlock();
49700+ }
49701+
49702+ return;
49703+}
49704+
49705+int
49706+gr_check_crash_exec(const struct file *filp)
49707+{
49708+ struct acl_subject_label *curr;
49709+
49710+ if (unlikely(!gr_acl_is_enabled()))
49711+ return 0;
49712+
49713+ read_lock(&gr_inode_lock);
49714+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
49715+ __get_dev(filp->f_path.dentry),
49716+ current->role);
49717+ read_unlock(&gr_inode_lock);
49718+
49719+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
49720+ (!curr->crashes && !curr->expires))
49721+ return 0;
49722+
49723+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49724+ time_after(curr->expires, get_seconds()))
49725+ return 1;
49726+ else if (time_before_eq(curr->expires, get_seconds())) {
49727+ curr->crashes = 0;
49728+ curr->expires = 0;
49729+ }
49730+
49731+ return 0;
49732+}
49733+
49734+void
49735+gr_handle_alertkill(struct task_struct *task)
49736+{
49737+ struct acl_subject_label *curracl;
49738+ __u32 curr_ip;
49739+ struct task_struct *p, *p2;
49740+
49741+ if (unlikely(!gr_acl_is_enabled()))
49742+ return;
49743+
49744+ curracl = task->acl;
49745+ curr_ip = task->signal->curr_ip;
49746+
49747+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
49748+ read_lock(&tasklist_lock);
49749+ do_each_thread(p2, p) {
49750+ if (p->signal->curr_ip == curr_ip)
49751+ gr_fake_force_sig(SIGKILL, p);
49752+ } while_each_thread(p2, p);
49753+ read_unlock(&tasklist_lock);
49754+ } else if (curracl->mode & GR_KILLPROC)
49755+ gr_fake_force_sig(SIGKILL, task);
49756+
49757+ return;
49758+}
49759diff -urNp linux-3.0.4/grsecurity/gracl_shm.c linux-3.0.4/grsecurity/gracl_shm.c
49760--- linux-3.0.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
49761+++ linux-3.0.4/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
49762@@ -0,0 +1,40 @@
49763+#include <linux/kernel.h>
49764+#include <linux/mm.h>
49765+#include <linux/sched.h>
49766+#include <linux/file.h>
49767+#include <linux/ipc.h>
49768+#include <linux/gracl.h>
49769+#include <linux/grsecurity.h>
49770+#include <linux/grinternal.h>
49771+
49772+int
49773+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49774+ const time_t shm_createtime, const uid_t cuid, const int shmid)
49775+{
49776+ struct task_struct *task;
49777+
49778+ if (!gr_acl_is_enabled())
49779+ return 1;
49780+
49781+ rcu_read_lock();
49782+ read_lock(&tasklist_lock);
49783+
49784+ task = find_task_by_vpid(shm_cprid);
49785+
49786+ if (unlikely(!task))
49787+ task = find_task_by_vpid(shm_lapid);
49788+
49789+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
49790+ (task->pid == shm_lapid)) &&
49791+ (task->acl->mode & GR_PROTSHM) &&
49792+ (task->acl != current->acl))) {
49793+ read_unlock(&tasklist_lock);
49794+ rcu_read_unlock();
49795+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
49796+ return 0;
49797+ }
49798+ read_unlock(&tasklist_lock);
49799+ rcu_read_unlock();
49800+
49801+ return 1;
49802+}
49803diff -urNp linux-3.0.4/grsecurity/grsec_chdir.c linux-3.0.4/grsecurity/grsec_chdir.c
49804--- linux-3.0.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
49805+++ linux-3.0.4/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
49806@@ -0,0 +1,19 @@
49807+#include <linux/kernel.h>
49808+#include <linux/sched.h>
49809+#include <linux/fs.h>
49810+#include <linux/file.h>
49811+#include <linux/grsecurity.h>
49812+#include <linux/grinternal.h>
49813+
49814+void
49815+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
49816+{
49817+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49818+ if ((grsec_enable_chdir && grsec_enable_group &&
49819+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
49820+ !grsec_enable_group)) {
49821+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
49822+ }
49823+#endif
49824+ return;
49825+}
49826diff -urNp linux-3.0.4/grsecurity/grsec_chroot.c linux-3.0.4/grsecurity/grsec_chroot.c
49827--- linux-3.0.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
49828+++ linux-3.0.4/grsecurity/grsec_chroot.c 2011-08-23 21:48:14.000000000 -0400
49829@@ -0,0 +1,349 @@
49830+#include <linux/kernel.h>
49831+#include <linux/module.h>
49832+#include <linux/sched.h>
49833+#include <linux/file.h>
49834+#include <linux/fs.h>
49835+#include <linux/mount.h>
49836+#include <linux/types.h>
49837+#include <linux/pid_namespace.h>
49838+#include <linux/grsecurity.h>
49839+#include <linux/grinternal.h>
49840+
49841+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
49842+{
49843+#ifdef CONFIG_GRKERNSEC
49844+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
49845+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
49846+ task->gr_is_chrooted = 1;
49847+ else
49848+ task->gr_is_chrooted = 0;
49849+
49850+ task->gr_chroot_dentry = path->dentry;
49851+#endif
49852+ return;
49853+}
49854+
49855+void gr_clear_chroot_entries(struct task_struct *task)
49856+{
49857+#ifdef CONFIG_GRKERNSEC
49858+ task->gr_is_chrooted = 0;
49859+ task->gr_chroot_dentry = NULL;
49860+#endif
49861+ return;
49862+}
49863+
49864+int
49865+gr_handle_chroot_unix(const pid_t pid)
49866+{
49867+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49868+ struct task_struct *p;
49869+
49870+ if (unlikely(!grsec_enable_chroot_unix))
49871+ return 1;
49872+
49873+ if (likely(!proc_is_chrooted(current)))
49874+ return 1;
49875+
49876+ rcu_read_lock();
49877+ read_lock(&tasklist_lock);
49878+ p = find_task_by_vpid_unrestricted(pid);
49879+ if (unlikely(p && !have_same_root(current, p))) {
49880+ read_unlock(&tasklist_lock);
49881+ rcu_read_unlock();
49882+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
49883+ return 0;
49884+ }
49885+ read_unlock(&tasklist_lock);
49886+ rcu_read_unlock();
49887+#endif
49888+ return 1;
49889+}
49890+
49891+int
49892+gr_handle_chroot_nice(void)
49893+{
49894+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49895+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
49896+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
49897+ return -EPERM;
49898+ }
49899+#endif
49900+ return 0;
49901+}
49902+
49903+int
49904+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
49905+{
49906+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49907+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
49908+ && proc_is_chrooted(current)) {
49909+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
49910+ return -EACCES;
49911+ }
49912+#endif
49913+ return 0;
49914+}
49915+
49916+int
49917+gr_handle_chroot_rawio(const struct inode *inode)
49918+{
49919+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49920+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
49921+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
49922+ return 1;
49923+#endif
49924+ return 0;
49925+}
49926+
49927+int
49928+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
49929+{
49930+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49931+ struct task_struct *p;
49932+ int ret = 0;
49933+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
49934+ return ret;
49935+
49936+ read_lock(&tasklist_lock);
49937+ do_each_pid_task(pid, type, p) {
49938+ if (!have_same_root(current, p)) {
49939+ ret = 1;
49940+ goto out;
49941+ }
49942+ } while_each_pid_task(pid, type, p);
49943+out:
49944+ read_unlock(&tasklist_lock);
49945+ return ret;
49946+#endif
49947+ return 0;
49948+}
49949+
49950+int
49951+gr_pid_is_chrooted(struct task_struct *p)
49952+{
49953+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49954+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
49955+ return 0;
49956+
49957+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
49958+ !have_same_root(current, p)) {
49959+ return 1;
49960+ }
49961+#endif
49962+ return 0;
49963+}
49964+
49965+EXPORT_SYMBOL(gr_pid_is_chrooted);
49966+
49967+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
49968+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
49969+{
49970+ struct path path, currentroot;
49971+ int ret = 0;
49972+
49973+ path.dentry = (struct dentry *)u_dentry;
49974+ path.mnt = (struct vfsmount *)u_mnt;
49975+ get_fs_root(current->fs, &currentroot);
49976+ if (path_is_under(&path, &currentroot))
49977+ ret = 1;
49978+ path_put(&currentroot);
49979+
49980+ return ret;
49981+}
49982+#endif
49983+
49984+int
49985+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
49986+{
49987+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49988+ if (!grsec_enable_chroot_fchdir)
49989+ return 1;
49990+
49991+ if (!proc_is_chrooted(current))
49992+ return 1;
49993+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
49994+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
49995+ return 0;
49996+ }
49997+#endif
49998+ return 1;
49999+}
50000+
50001+int
50002+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50003+ const time_t shm_createtime)
50004+{
50005+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50006+ struct task_struct *p;
50007+ time_t starttime;
50008+
50009+ if (unlikely(!grsec_enable_chroot_shmat))
50010+ return 1;
50011+
50012+ if (likely(!proc_is_chrooted(current)))
50013+ return 1;
50014+
50015+ rcu_read_lock();
50016+ read_lock(&tasklist_lock);
50017+
50018+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
50019+ starttime = p->start_time.tv_sec;
50020+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
50021+ if (have_same_root(current, p)) {
50022+ goto allow;
50023+ } else {
50024+ read_unlock(&tasklist_lock);
50025+ rcu_read_unlock();
50026+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50027+ return 0;
50028+ }
50029+ }
50030+ /* creator exited, pid reuse, fall through to next check */
50031+ }
50032+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
50033+ if (unlikely(!have_same_root(current, p))) {
50034+ read_unlock(&tasklist_lock);
50035+ rcu_read_unlock();
50036+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50037+ return 0;
50038+ }
50039+ }
50040+
50041+allow:
50042+ read_unlock(&tasklist_lock);
50043+ rcu_read_unlock();
50044+#endif
50045+ return 1;
50046+}
50047+
50048+void
50049+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
50050+{
50051+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
50052+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
50053+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
50054+#endif
50055+ return;
50056+}
50057+
50058+int
50059+gr_handle_chroot_mknod(const struct dentry *dentry,
50060+ const struct vfsmount *mnt, const int mode)
50061+{
50062+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50063+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
50064+ proc_is_chrooted(current)) {
50065+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
50066+ return -EPERM;
50067+ }
50068+#endif
50069+ return 0;
50070+}
50071+
50072+int
50073+gr_handle_chroot_mount(const struct dentry *dentry,
50074+ const struct vfsmount *mnt, const char *dev_name)
50075+{
50076+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50077+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
50078+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
50079+ return -EPERM;
50080+ }
50081+#endif
50082+ return 0;
50083+}
50084+
50085+int
50086+gr_handle_chroot_pivot(void)
50087+{
50088+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50089+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
50090+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
50091+ return -EPERM;
50092+ }
50093+#endif
50094+ return 0;
50095+}
50096+
50097+int
50098+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
50099+{
50100+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50101+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
50102+ !gr_is_outside_chroot(dentry, mnt)) {
50103+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
50104+ return -EPERM;
50105+ }
50106+#endif
50107+ return 0;
50108+}
50109+
50110+int
50111+gr_handle_chroot_caps(struct path *path)
50112+{
50113+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50114+ if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
50115+ (init_task.fs->root.dentry != path->dentry) &&
50116+ (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
50117+
50118+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
50119+ const struct cred *old = current_cred();
50120+ struct cred *new = prepare_creds();
50121+ if (new == NULL)
50122+ return 1;
50123+
50124+ new->cap_permitted = cap_drop(old->cap_permitted,
50125+ chroot_caps);
50126+ new->cap_inheritable = cap_drop(old->cap_inheritable,
50127+ chroot_caps);
50128+ new->cap_effective = cap_drop(old->cap_effective,
50129+ chroot_caps);
50130+
50131+ commit_creds(new);
50132+
50133+ return 0;
50134+ }
50135+#endif
50136+ return 0;
50137+}
50138+
50139+int
50140+gr_handle_chroot_sysctl(const int op)
50141+{
50142+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50143+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
50144+ proc_is_chrooted(current))
50145+ return -EACCES;
50146+#endif
50147+ return 0;
50148+}
50149+
50150+void
50151+gr_handle_chroot_chdir(struct path *path)
50152+{
50153+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50154+ if (grsec_enable_chroot_chdir)
50155+ set_fs_pwd(current->fs, path);
50156+#endif
50157+ return;
50158+}
50159+
50160+int
50161+gr_handle_chroot_chmod(const struct dentry *dentry,
50162+ const struct vfsmount *mnt, const int mode)
50163+{
50164+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50165+ /* allow chmod +s on directories, but not files */
50166+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
50167+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
50168+ proc_is_chrooted(current)) {
50169+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
50170+ return -EPERM;
50171+ }
50172+#endif
50173+ return 0;
50174+}
50175+
50176+#ifdef CONFIG_SECURITY
50177+EXPORT_SYMBOL(gr_handle_chroot_caps);
50178+#endif
50179diff -urNp linux-3.0.4/grsecurity/grsec_disabled.c linux-3.0.4/grsecurity/grsec_disabled.c
50180--- linux-3.0.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
50181+++ linux-3.0.4/grsecurity/grsec_disabled.c 2011-08-23 21:48:14.000000000 -0400
50182@@ -0,0 +1,447 @@
50183+#include <linux/kernel.h>
50184+#include <linux/module.h>
50185+#include <linux/sched.h>
50186+#include <linux/file.h>
50187+#include <linux/fs.h>
50188+#include <linux/kdev_t.h>
50189+#include <linux/net.h>
50190+#include <linux/in.h>
50191+#include <linux/ip.h>
50192+#include <linux/skbuff.h>
50193+#include <linux/sysctl.h>
50194+
50195+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50196+void
50197+pax_set_initial_flags(struct linux_binprm *bprm)
50198+{
50199+ return;
50200+}
50201+#endif
50202+
50203+#ifdef CONFIG_SYSCTL
50204+__u32
50205+gr_handle_sysctl(const struct ctl_table * table, const int op)
50206+{
50207+ return 0;
50208+}
50209+#endif
50210+
50211+#ifdef CONFIG_TASKSTATS
50212+int gr_is_taskstats_denied(int pid)
50213+{
50214+ return 0;
50215+}
50216+#endif
50217+
50218+int
50219+gr_acl_is_enabled(void)
50220+{
50221+ return 0;
50222+}
50223+
50224+int
50225+gr_handle_rawio(const struct inode *inode)
50226+{
50227+ return 0;
50228+}
50229+
50230+void
50231+gr_acl_handle_psacct(struct task_struct *task, const long code)
50232+{
50233+ return;
50234+}
50235+
50236+int
50237+gr_handle_ptrace(struct task_struct *task, const long request)
50238+{
50239+ return 0;
50240+}
50241+
50242+int
50243+gr_handle_proc_ptrace(struct task_struct *task)
50244+{
50245+ return 0;
50246+}
50247+
50248+void
50249+gr_learn_resource(const struct task_struct *task,
50250+ const int res, const unsigned long wanted, const int gt)
50251+{
50252+ return;
50253+}
50254+
50255+int
50256+gr_set_acls(const int type)
50257+{
50258+ return 0;
50259+}
50260+
50261+int
50262+gr_check_hidden_task(const struct task_struct *tsk)
50263+{
50264+ return 0;
50265+}
50266+
50267+int
50268+gr_check_protected_task(const struct task_struct *task)
50269+{
50270+ return 0;
50271+}
50272+
50273+int
50274+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50275+{
50276+ return 0;
50277+}
50278+
50279+void
50280+gr_copy_label(struct task_struct *tsk)
50281+{
50282+ return;
50283+}
50284+
50285+void
50286+gr_set_pax_flags(struct task_struct *task)
50287+{
50288+ return;
50289+}
50290+
50291+int
50292+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50293+ const int unsafe_share)
50294+{
50295+ return 0;
50296+}
50297+
50298+void
50299+gr_handle_delete(const ino_t ino, const dev_t dev)
50300+{
50301+ return;
50302+}
50303+
50304+void
50305+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50306+{
50307+ return;
50308+}
50309+
50310+void
50311+gr_handle_crash(struct task_struct *task, const int sig)
50312+{
50313+ return;
50314+}
50315+
50316+int
50317+gr_check_crash_exec(const struct file *filp)
50318+{
50319+ return 0;
50320+}
50321+
50322+int
50323+gr_check_crash_uid(const uid_t uid)
50324+{
50325+ return 0;
50326+}
50327+
50328+void
50329+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50330+ struct dentry *old_dentry,
50331+ struct dentry *new_dentry,
50332+ struct vfsmount *mnt, const __u8 replace)
50333+{
50334+ return;
50335+}
50336+
50337+int
50338+gr_search_socket(const int family, const int type, const int protocol)
50339+{
50340+ return 1;
50341+}
50342+
50343+int
50344+gr_search_connectbind(const int mode, const struct socket *sock,
50345+ const struct sockaddr_in *addr)
50346+{
50347+ return 0;
50348+}
50349+
50350+int
50351+gr_is_capable(const int cap)
50352+{
50353+ return 1;
50354+}
50355+
50356+int
50357+gr_is_capable_nolog(const int cap)
50358+{
50359+ return 1;
50360+}
50361+
50362+void
50363+gr_handle_alertkill(struct task_struct *task)
50364+{
50365+ return;
50366+}
50367+
50368+__u32
50369+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
50370+{
50371+ return 1;
50372+}
50373+
50374+__u32
50375+gr_acl_handle_hidden_file(const struct dentry * dentry,
50376+ const struct vfsmount * mnt)
50377+{
50378+ return 1;
50379+}
50380+
50381+__u32
50382+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50383+ const int fmode)
50384+{
50385+ return 1;
50386+}
50387+
50388+__u32
50389+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
50390+{
50391+ return 1;
50392+}
50393+
50394+__u32
50395+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
50396+{
50397+ return 1;
50398+}
50399+
50400+int
50401+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
50402+ unsigned int *vm_flags)
50403+{
50404+ return 1;
50405+}
50406+
50407+__u32
50408+gr_acl_handle_truncate(const struct dentry * dentry,
50409+ const struct vfsmount * mnt)
50410+{
50411+ return 1;
50412+}
50413+
50414+__u32
50415+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
50416+{
50417+ return 1;
50418+}
50419+
50420+__u32
50421+gr_acl_handle_access(const struct dentry * dentry,
50422+ const struct vfsmount * mnt, const int fmode)
50423+{
50424+ return 1;
50425+}
50426+
50427+__u32
50428+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
50429+ mode_t mode)
50430+{
50431+ return 1;
50432+}
50433+
50434+__u32
50435+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
50436+ mode_t mode)
50437+{
50438+ return 1;
50439+}
50440+
50441+__u32
50442+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
50443+{
50444+ return 1;
50445+}
50446+
50447+__u32
50448+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
50449+{
50450+ return 1;
50451+}
50452+
50453+void
50454+grsecurity_init(void)
50455+{
50456+ return;
50457+}
50458+
50459+__u32
50460+gr_acl_handle_mknod(const struct dentry * new_dentry,
50461+ const struct dentry * parent_dentry,
50462+ const struct vfsmount * parent_mnt,
50463+ const int mode)
50464+{
50465+ return 1;
50466+}
50467+
50468+__u32
50469+gr_acl_handle_mkdir(const struct dentry * new_dentry,
50470+ const struct dentry * parent_dentry,
50471+ const struct vfsmount * parent_mnt)
50472+{
50473+ return 1;
50474+}
50475+
50476+__u32
50477+gr_acl_handle_symlink(const struct dentry * new_dentry,
50478+ const struct dentry * parent_dentry,
50479+ const struct vfsmount * parent_mnt, const char *from)
50480+{
50481+ return 1;
50482+}
50483+
50484+__u32
50485+gr_acl_handle_link(const struct dentry * new_dentry,
50486+ const struct dentry * parent_dentry,
50487+ const struct vfsmount * parent_mnt,
50488+ const struct dentry * old_dentry,
50489+ const struct vfsmount * old_mnt, const char *to)
50490+{
50491+ return 1;
50492+}
50493+
50494+int
50495+gr_acl_handle_rename(const struct dentry *new_dentry,
50496+ const struct dentry *parent_dentry,
50497+ const struct vfsmount *parent_mnt,
50498+ const struct dentry *old_dentry,
50499+ const struct inode *old_parent_inode,
50500+ const struct vfsmount *old_mnt, const char *newname)
50501+{
50502+ return 0;
50503+}
50504+
50505+int
50506+gr_acl_handle_filldir(const struct file *file, const char *name,
50507+ const int namelen, const ino_t ino)
50508+{
50509+ return 1;
50510+}
50511+
50512+int
50513+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50514+ const time_t shm_createtime, const uid_t cuid, const int shmid)
50515+{
50516+ return 1;
50517+}
50518+
50519+int
50520+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
50521+{
50522+ return 0;
50523+}
50524+
50525+int
50526+gr_search_accept(const struct socket *sock)
50527+{
50528+ return 0;
50529+}
50530+
50531+int
50532+gr_search_listen(const struct socket *sock)
50533+{
50534+ return 0;
50535+}
50536+
50537+int
50538+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
50539+{
50540+ return 0;
50541+}
50542+
50543+__u32
50544+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
50545+{
50546+ return 1;
50547+}
50548+
50549+__u32
50550+gr_acl_handle_creat(const struct dentry * dentry,
50551+ const struct dentry * p_dentry,
50552+ const struct vfsmount * p_mnt, const int fmode,
50553+ const int imode)
50554+{
50555+ return 1;
50556+}
50557+
50558+void
50559+gr_acl_handle_exit(void)
50560+{
50561+ return;
50562+}
50563+
50564+int
50565+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50566+{
50567+ return 1;
50568+}
50569+
50570+void
50571+gr_set_role_label(const uid_t uid, const gid_t gid)
50572+{
50573+ return;
50574+}
50575+
50576+int
50577+gr_acl_handle_procpidmem(const struct task_struct *task)
50578+{
50579+ return 0;
50580+}
50581+
50582+int
50583+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
50584+{
50585+ return 0;
50586+}
50587+
50588+int
50589+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
50590+{
50591+ return 0;
50592+}
50593+
50594+void
50595+gr_set_kernel_label(struct task_struct *task)
50596+{
50597+ return;
50598+}
50599+
50600+int
50601+gr_check_user_change(int real, int effective, int fs)
50602+{
50603+ return 0;
50604+}
50605+
50606+int
50607+gr_check_group_change(int real, int effective, int fs)
50608+{
50609+ return 0;
50610+}
50611+
50612+int gr_acl_enable_at_secure(void)
50613+{
50614+ return 0;
50615+}
50616+
50617+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50618+{
50619+ return dentry->d_inode->i_sb->s_dev;
50620+}
50621+
50622+EXPORT_SYMBOL(gr_is_capable);
50623+EXPORT_SYMBOL(gr_is_capable_nolog);
50624+EXPORT_SYMBOL(gr_learn_resource);
50625+EXPORT_SYMBOL(gr_set_kernel_label);
50626+#ifdef CONFIG_SECURITY
50627+EXPORT_SYMBOL(gr_check_user_change);
50628+EXPORT_SYMBOL(gr_check_group_change);
50629+#endif
50630diff -urNp linux-3.0.4/grsecurity/grsec_exec.c linux-3.0.4/grsecurity/grsec_exec.c
50631--- linux-3.0.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
50632+++ linux-3.0.4/grsecurity/grsec_exec.c 2011-08-25 17:25:59.000000000 -0400
50633@@ -0,0 +1,72 @@
50634+#include <linux/kernel.h>
50635+#include <linux/sched.h>
50636+#include <linux/file.h>
50637+#include <linux/binfmts.h>
50638+#include <linux/fs.h>
50639+#include <linux/types.h>
50640+#include <linux/grdefs.h>
50641+#include <linux/grsecurity.h>
50642+#include <linux/grinternal.h>
50643+#include <linux/capability.h>
50644+
50645+#include <asm/uaccess.h>
50646+
50647+#ifdef CONFIG_GRKERNSEC_EXECLOG
50648+static char gr_exec_arg_buf[132];
50649+static DEFINE_MUTEX(gr_exec_arg_mutex);
50650+#endif
50651+
50652+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
50653+
50654+void
50655+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
50656+{
50657+#ifdef CONFIG_GRKERNSEC_EXECLOG
50658+ char *grarg = gr_exec_arg_buf;
50659+ unsigned int i, x, execlen = 0;
50660+ char c;
50661+
50662+ if (!((grsec_enable_execlog && grsec_enable_group &&
50663+ in_group_p(grsec_audit_gid))
50664+ || (grsec_enable_execlog && !grsec_enable_group)))
50665+ return;
50666+
50667+ mutex_lock(&gr_exec_arg_mutex);
50668+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
50669+
50670+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
50671+ const char __user *p;
50672+ unsigned int len;
50673+
50674+ p = get_user_arg_ptr(argv, i);
50675+ if (IS_ERR(p))
50676+ goto log;
50677+
50678+ len = strnlen_user(p, 128 - execlen);
50679+ if (len > 128 - execlen)
50680+ len = 128 - execlen;
50681+ else if (len > 0)
50682+ len--;
50683+ if (copy_from_user(grarg + execlen, p, len))
50684+ goto log;
50685+
50686+ /* rewrite unprintable characters */
50687+ for (x = 0; x < len; x++) {
50688+ c = *(grarg + execlen + x);
50689+ if (c < 32 || c > 126)
50690+ *(grarg + execlen + x) = ' ';
50691+ }
50692+
50693+ execlen += len;
50694+ *(grarg + execlen) = ' ';
50695+ *(grarg + execlen + 1) = '\0';
50696+ execlen++;
50697+ }
50698+
50699+ log:
50700+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50701+ bprm->file->f_path.mnt, grarg);
50702+ mutex_unlock(&gr_exec_arg_mutex);
50703+#endif
50704+ return;
50705+}
50706diff -urNp linux-3.0.4/grsecurity/grsec_fifo.c linux-3.0.4/grsecurity/grsec_fifo.c
50707--- linux-3.0.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
50708+++ linux-3.0.4/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
50709@@ -0,0 +1,24 @@
50710+#include <linux/kernel.h>
50711+#include <linux/sched.h>
50712+#include <linux/fs.h>
50713+#include <linux/file.h>
50714+#include <linux/grinternal.h>
50715+
50716+int
50717+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
50718+ const struct dentry *dir, const int flag, const int acc_mode)
50719+{
50720+#ifdef CONFIG_GRKERNSEC_FIFO
50721+ const struct cred *cred = current_cred();
50722+
50723+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
50724+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
50725+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
50726+ (cred->fsuid != dentry->d_inode->i_uid)) {
50727+ if (!inode_permission(dentry->d_inode, acc_mode))
50728+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
50729+ return -EACCES;
50730+ }
50731+#endif
50732+ return 0;
50733+}
50734diff -urNp linux-3.0.4/grsecurity/grsec_fork.c linux-3.0.4/grsecurity/grsec_fork.c
50735--- linux-3.0.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
50736+++ linux-3.0.4/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
50737@@ -0,0 +1,23 @@
50738+#include <linux/kernel.h>
50739+#include <linux/sched.h>
50740+#include <linux/grsecurity.h>
50741+#include <linux/grinternal.h>
50742+#include <linux/errno.h>
50743+
50744+void
50745+gr_log_forkfail(const int retval)
50746+{
50747+#ifdef CONFIG_GRKERNSEC_FORKFAIL
50748+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
50749+ switch (retval) {
50750+ case -EAGAIN:
50751+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
50752+ break;
50753+ case -ENOMEM:
50754+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
50755+ break;
50756+ }
50757+ }
50758+#endif
50759+ return;
50760+}
50761diff -urNp linux-3.0.4/grsecurity/grsec_init.c linux-3.0.4/grsecurity/grsec_init.c
50762--- linux-3.0.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
50763+++ linux-3.0.4/grsecurity/grsec_init.c 2011-08-25 17:25:12.000000000 -0400
50764@@ -0,0 +1,269 @@
50765+#include <linux/kernel.h>
50766+#include <linux/sched.h>
50767+#include <linux/mm.h>
50768+#include <linux/gracl.h>
50769+#include <linux/slab.h>
50770+#include <linux/vmalloc.h>
50771+#include <linux/percpu.h>
50772+#include <linux/module.h>
50773+
50774+int grsec_enable_brute;
50775+int grsec_enable_link;
50776+int grsec_enable_dmesg;
50777+int grsec_enable_harden_ptrace;
50778+int grsec_enable_fifo;
50779+int grsec_enable_execlog;
50780+int grsec_enable_signal;
50781+int grsec_enable_forkfail;
50782+int grsec_enable_audit_ptrace;
50783+int grsec_enable_time;
50784+int grsec_enable_audit_textrel;
50785+int grsec_enable_group;
50786+int grsec_audit_gid;
50787+int grsec_enable_chdir;
50788+int grsec_enable_mount;
50789+int grsec_enable_rofs;
50790+int grsec_enable_chroot_findtask;
50791+int grsec_enable_chroot_mount;
50792+int grsec_enable_chroot_shmat;
50793+int grsec_enable_chroot_fchdir;
50794+int grsec_enable_chroot_double;
50795+int grsec_enable_chroot_pivot;
50796+int grsec_enable_chroot_chdir;
50797+int grsec_enable_chroot_chmod;
50798+int grsec_enable_chroot_mknod;
50799+int grsec_enable_chroot_nice;
50800+int grsec_enable_chroot_execlog;
50801+int grsec_enable_chroot_caps;
50802+int grsec_enable_chroot_sysctl;
50803+int grsec_enable_chroot_unix;
50804+int grsec_enable_tpe;
50805+int grsec_tpe_gid;
50806+int grsec_enable_blackhole;
50807+#ifdef CONFIG_IPV6_MODULE
50808+EXPORT_SYMBOL(grsec_enable_blackhole);
50809+#endif
50810+int grsec_lastack_retries;
50811+int grsec_enable_tpe_all;
50812+int grsec_enable_tpe_invert;
50813+int grsec_enable_socket_all;
50814+int grsec_socket_all_gid;
50815+int grsec_enable_socket_client;
50816+int grsec_socket_client_gid;
50817+int grsec_enable_socket_server;
50818+int grsec_socket_server_gid;
50819+int grsec_resource_logging;
50820+int grsec_disable_privio;
50821+int grsec_enable_log_rwxmaps;
50822+int grsec_lock;
50823+
50824+DEFINE_SPINLOCK(grsec_alert_lock);
50825+unsigned long grsec_alert_wtime = 0;
50826+unsigned long grsec_alert_fyet = 0;
50827+
50828+DEFINE_SPINLOCK(grsec_audit_lock);
50829+
50830+DEFINE_RWLOCK(grsec_exec_file_lock);
50831+
50832+char *gr_shared_page[4];
50833+
50834+char *gr_alert_log_fmt;
50835+char *gr_audit_log_fmt;
50836+char *gr_alert_log_buf;
50837+char *gr_audit_log_buf;
50838+
50839+extern struct gr_arg *gr_usermode;
50840+extern unsigned char *gr_system_salt;
50841+extern unsigned char *gr_system_sum;
50842+
50843+void __init
50844+grsecurity_init(void)
50845+{
50846+ int j;
50847+ /* create the per-cpu shared pages */
50848+
50849+#ifdef CONFIG_X86
50850+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
50851+#endif
50852+
50853+ for (j = 0; j < 4; j++) {
50854+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
50855+ if (gr_shared_page[j] == NULL) {
50856+ panic("Unable to allocate grsecurity shared page");
50857+ return;
50858+ }
50859+ }
50860+
50861+ /* allocate log buffers */
50862+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
50863+ if (!gr_alert_log_fmt) {
50864+ panic("Unable to allocate grsecurity alert log format buffer");
50865+ return;
50866+ }
50867+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
50868+ if (!gr_audit_log_fmt) {
50869+ panic("Unable to allocate grsecurity audit log format buffer");
50870+ return;
50871+ }
50872+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50873+ if (!gr_alert_log_buf) {
50874+ panic("Unable to allocate grsecurity alert log buffer");
50875+ return;
50876+ }
50877+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50878+ if (!gr_audit_log_buf) {
50879+ panic("Unable to allocate grsecurity audit log buffer");
50880+ return;
50881+ }
50882+
50883+ /* allocate memory for authentication structure */
50884+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
50885+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
50886+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
50887+
50888+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
50889+ panic("Unable to allocate grsecurity authentication structure");
50890+ return;
50891+ }
50892+
50893+
50894+#ifdef CONFIG_GRKERNSEC_IO
50895+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
50896+ grsec_disable_privio = 1;
50897+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50898+ grsec_disable_privio = 1;
50899+#else
50900+ grsec_disable_privio = 0;
50901+#endif
50902+#endif
50903+
50904+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
50905+ /* for backward compatibility, tpe_invert always defaults to on if
50906+ enabled in the kernel
50907+ */
50908+ grsec_enable_tpe_invert = 1;
50909+#endif
50910+
50911+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50912+#ifndef CONFIG_GRKERNSEC_SYSCTL
50913+ grsec_lock = 1;
50914+#endif
50915+
50916+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
50917+ grsec_enable_audit_textrel = 1;
50918+#endif
50919+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
50920+ grsec_enable_log_rwxmaps = 1;
50921+#endif
50922+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
50923+ grsec_enable_group = 1;
50924+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
50925+#endif
50926+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
50927+ grsec_enable_chdir = 1;
50928+#endif
50929+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50930+ grsec_enable_harden_ptrace = 1;
50931+#endif
50932+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
50933+ grsec_enable_mount = 1;
50934+#endif
50935+#ifdef CONFIG_GRKERNSEC_LINK
50936+ grsec_enable_link = 1;
50937+#endif
50938+#ifdef CONFIG_GRKERNSEC_BRUTE
50939+ grsec_enable_brute = 1;
50940+#endif
50941+#ifdef CONFIG_GRKERNSEC_DMESG
50942+ grsec_enable_dmesg = 1;
50943+#endif
50944+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
50945+ grsec_enable_blackhole = 1;
50946+ grsec_lastack_retries = 4;
50947+#endif
50948+#ifdef CONFIG_GRKERNSEC_FIFO
50949+ grsec_enable_fifo = 1;
50950+#endif
50951+#ifdef CONFIG_GRKERNSEC_EXECLOG
50952+ grsec_enable_execlog = 1;
50953+#endif
50954+#ifdef CONFIG_GRKERNSEC_SIGNAL
50955+ grsec_enable_signal = 1;
50956+#endif
50957+#ifdef CONFIG_GRKERNSEC_FORKFAIL
50958+ grsec_enable_forkfail = 1;
50959+#endif
50960+#ifdef CONFIG_GRKERNSEC_TIME
50961+ grsec_enable_time = 1;
50962+#endif
50963+#ifdef CONFIG_GRKERNSEC_RESLOG
50964+ grsec_resource_logging = 1;
50965+#endif
50966+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50967+ grsec_enable_chroot_findtask = 1;
50968+#endif
50969+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
50970+ grsec_enable_chroot_unix = 1;
50971+#endif
50972+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50973+ grsec_enable_chroot_mount = 1;
50974+#endif
50975+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
50976+ grsec_enable_chroot_fchdir = 1;
50977+#endif
50978+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50979+ grsec_enable_chroot_shmat = 1;
50980+#endif
50981+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
50982+ grsec_enable_audit_ptrace = 1;
50983+#endif
50984+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50985+ grsec_enable_chroot_double = 1;
50986+#endif
50987+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50988+ grsec_enable_chroot_pivot = 1;
50989+#endif
50990+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50991+ grsec_enable_chroot_chdir = 1;
50992+#endif
50993+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50994+ grsec_enable_chroot_chmod = 1;
50995+#endif
50996+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50997+ grsec_enable_chroot_mknod = 1;
50998+#endif
50999+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51000+ grsec_enable_chroot_nice = 1;
51001+#endif
51002+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
51003+ grsec_enable_chroot_execlog = 1;
51004+#endif
51005+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51006+ grsec_enable_chroot_caps = 1;
51007+#endif
51008+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
51009+ grsec_enable_chroot_sysctl = 1;
51010+#endif
51011+#ifdef CONFIG_GRKERNSEC_TPE
51012+ grsec_enable_tpe = 1;
51013+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
51014+#ifdef CONFIG_GRKERNSEC_TPE_ALL
51015+ grsec_enable_tpe_all = 1;
51016+#endif
51017+#endif
51018+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51019+ grsec_enable_socket_all = 1;
51020+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
51021+#endif
51022+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51023+ grsec_enable_socket_client = 1;
51024+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
51025+#endif
51026+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51027+ grsec_enable_socket_server = 1;
51028+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
51029+#endif
51030+#endif
51031+
51032+ return;
51033+}
51034diff -urNp linux-3.0.4/grsecurity/grsec_link.c linux-3.0.4/grsecurity/grsec_link.c
51035--- linux-3.0.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
51036+++ linux-3.0.4/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
51037@@ -0,0 +1,43 @@
51038+#include <linux/kernel.h>
51039+#include <linux/sched.h>
51040+#include <linux/fs.h>
51041+#include <linux/file.h>
51042+#include <linux/grinternal.h>
51043+
51044+int
51045+gr_handle_follow_link(const struct inode *parent,
51046+ const struct inode *inode,
51047+ const struct dentry *dentry, const struct vfsmount *mnt)
51048+{
51049+#ifdef CONFIG_GRKERNSEC_LINK
51050+ const struct cred *cred = current_cred();
51051+
51052+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
51053+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
51054+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
51055+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
51056+ return -EACCES;
51057+ }
51058+#endif
51059+ return 0;
51060+}
51061+
51062+int
51063+gr_handle_hardlink(const struct dentry *dentry,
51064+ const struct vfsmount *mnt,
51065+ struct inode *inode, const int mode, const char *to)
51066+{
51067+#ifdef CONFIG_GRKERNSEC_LINK
51068+ const struct cred *cred = current_cred();
51069+
51070+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
51071+ (!S_ISREG(mode) || (mode & S_ISUID) ||
51072+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
51073+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
51074+ !capable(CAP_FOWNER) && cred->uid) {
51075+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
51076+ return -EPERM;
51077+ }
51078+#endif
51079+ return 0;
51080+}
51081diff -urNp linux-3.0.4/grsecurity/grsec_log.c linux-3.0.4/grsecurity/grsec_log.c
51082--- linux-3.0.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
51083+++ linux-3.0.4/grsecurity/grsec_log.c 2011-08-23 21:48:14.000000000 -0400
51084@@ -0,0 +1,310 @@
51085+#include <linux/kernel.h>
51086+#include <linux/sched.h>
51087+#include <linux/file.h>
51088+#include <linux/tty.h>
51089+#include <linux/fs.h>
51090+#include <linux/grinternal.h>
51091+
51092+#ifdef CONFIG_TREE_PREEMPT_RCU
51093+#define DISABLE_PREEMPT() preempt_disable()
51094+#define ENABLE_PREEMPT() preempt_enable()
51095+#else
51096+#define DISABLE_PREEMPT()
51097+#define ENABLE_PREEMPT()
51098+#endif
51099+
51100+#define BEGIN_LOCKS(x) \
51101+ DISABLE_PREEMPT(); \
51102+ rcu_read_lock(); \
51103+ read_lock(&tasklist_lock); \
51104+ read_lock(&grsec_exec_file_lock); \
51105+ if (x != GR_DO_AUDIT) \
51106+ spin_lock(&grsec_alert_lock); \
51107+ else \
51108+ spin_lock(&grsec_audit_lock)
51109+
51110+#define END_LOCKS(x) \
51111+ if (x != GR_DO_AUDIT) \
51112+ spin_unlock(&grsec_alert_lock); \
51113+ else \
51114+ spin_unlock(&grsec_audit_lock); \
51115+ read_unlock(&grsec_exec_file_lock); \
51116+ read_unlock(&tasklist_lock); \
51117+ rcu_read_unlock(); \
51118+ ENABLE_PREEMPT(); \
51119+ if (x == GR_DONT_AUDIT) \
51120+ gr_handle_alertkill(current)
51121+
51122+enum {
51123+ FLOODING,
51124+ NO_FLOODING
51125+};
51126+
51127+extern char *gr_alert_log_fmt;
51128+extern char *gr_audit_log_fmt;
51129+extern char *gr_alert_log_buf;
51130+extern char *gr_audit_log_buf;
51131+
51132+static int gr_log_start(int audit)
51133+{
51134+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
51135+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
51136+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51137+
51138+ if (audit == GR_DO_AUDIT)
51139+ goto set_fmt;
51140+
51141+ if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
51142+ grsec_alert_wtime = jiffies;
51143+ grsec_alert_fyet = 0;
51144+ } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
51145+ grsec_alert_fyet++;
51146+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
51147+ grsec_alert_wtime = jiffies;
51148+ grsec_alert_fyet++;
51149+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
51150+ return FLOODING;
51151+ } else return FLOODING;
51152+
51153+set_fmt:
51154+ memset(buf, 0, PAGE_SIZE);
51155+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
51156+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
51157+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51158+ } else if (current->signal->curr_ip) {
51159+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
51160+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
51161+ } else if (gr_acl_is_enabled()) {
51162+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
51163+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51164+ } else {
51165+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
51166+ strcpy(buf, fmt);
51167+ }
51168+
51169+ return NO_FLOODING;
51170+}
51171+
51172+static void gr_log_middle(int audit, const char *msg, va_list ap)
51173+ __attribute__ ((format (printf, 2, 0)));
51174+
51175+static void gr_log_middle(int audit, const char *msg, va_list ap)
51176+{
51177+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51178+ unsigned int len = strlen(buf);
51179+
51180+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51181+
51182+ return;
51183+}
51184+
51185+static void gr_log_middle_varargs(int audit, const char *msg, ...)
51186+ __attribute__ ((format (printf, 2, 3)));
51187+
51188+static void gr_log_middle_varargs(int audit, const char *msg, ...)
51189+{
51190+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51191+ unsigned int len = strlen(buf);
51192+ va_list ap;
51193+
51194+ va_start(ap, msg);
51195+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51196+ va_end(ap);
51197+
51198+ return;
51199+}
51200+
51201+static void gr_log_end(int audit)
51202+{
51203+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51204+ unsigned int len = strlen(buf);
51205+
51206+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
51207+ printk("%s\n", buf);
51208+
51209+ return;
51210+}
51211+
51212+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
51213+{
51214+ int logtype;
51215+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
51216+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
51217+ void *voidptr = NULL;
51218+ int num1 = 0, num2 = 0;
51219+ unsigned long ulong1 = 0, ulong2 = 0;
51220+ struct dentry *dentry = NULL;
51221+ struct vfsmount *mnt = NULL;
51222+ struct file *file = NULL;
51223+ struct task_struct *task = NULL;
51224+ const struct cred *cred, *pcred;
51225+ va_list ap;
51226+
51227+ BEGIN_LOCKS(audit);
51228+ logtype = gr_log_start(audit);
51229+ if (logtype == FLOODING) {
51230+ END_LOCKS(audit);
51231+ return;
51232+ }
51233+ va_start(ap, argtypes);
51234+ switch (argtypes) {
51235+ case GR_TTYSNIFF:
51236+ task = va_arg(ap, struct task_struct *);
51237+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
51238+ break;
51239+ case GR_SYSCTL_HIDDEN:
51240+ str1 = va_arg(ap, char *);
51241+ gr_log_middle_varargs(audit, msg, result, str1);
51242+ break;
51243+ case GR_RBAC:
51244+ dentry = va_arg(ap, struct dentry *);
51245+ mnt = va_arg(ap, struct vfsmount *);
51246+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
51247+ break;
51248+ case GR_RBAC_STR:
51249+ dentry = va_arg(ap, struct dentry *);
51250+ mnt = va_arg(ap, struct vfsmount *);
51251+ str1 = va_arg(ap, char *);
51252+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
51253+ break;
51254+ case GR_STR_RBAC:
51255+ str1 = va_arg(ap, char *);
51256+ dentry = va_arg(ap, struct dentry *);
51257+ mnt = va_arg(ap, struct vfsmount *);
51258+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
51259+ break;
51260+ case GR_RBAC_MODE2:
51261+ dentry = va_arg(ap, struct dentry *);
51262+ mnt = va_arg(ap, struct vfsmount *);
51263+ str1 = va_arg(ap, char *);
51264+ str2 = va_arg(ap, char *);
51265+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
51266+ break;
51267+ case GR_RBAC_MODE3:
51268+ dentry = va_arg(ap, struct dentry *);
51269+ mnt = va_arg(ap, struct vfsmount *);
51270+ str1 = va_arg(ap, char *);
51271+ str2 = va_arg(ap, char *);
51272+ str3 = va_arg(ap, char *);
51273+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
51274+ break;
51275+ case GR_FILENAME:
51276+ dentry = va_arg(ap, struct dentry *);
51277+ mnt = va_arg(ap, struct vfsmount *);
51278+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
51279+ break;
51280+ case GR_STR_FILENAME:
51281+ str1 = va_arg(ap, char *);
51282+ dentry = va_arg(ap, struct dentry *);
51283+ mnt = va_arg(ap, struct vfsmount *);
51284+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
51285+ break;
51286+ case GR_FILENAME_STR:
51287+ dentry = va_arg(ap, struct dentry *);
51288+ mnt = va_arg(ap, struct vfsmount *);
51289+ str1 = va_arg(ap, char *);
51290+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
51291+ break;
51292+ case GR_FILENAME_TWO_INT:
51293+ dentry = va_arg(ap, struct dentry *);
51294+ mnt = va_arg(ap, struct vfsmount *);
51295+ num1 = va_arg(ap, int);
51296+ num2 = va_arg(ap, int);
51297+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
51298+ break;
51299+ case GR_FILENAME_TWO_INT_STR:
51300+ dentry = va_arg(ap, struct dentry *);
51301+ mnt = va_arg(ap, struct vfsmount *);
51302+ num1 = va_arg(ap, int);
51303+ num2 = va_arg(ap, int);
51304+ str1 = va_arg(ap, char *);
51305+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
51306+ break;
51307+ case GR_TEXTREL:
51308+ file = va_arg(ap, struct file *);
51309+ ulong1 = va_arg(ap, unsigned long);
51310+ ulong2 = va_arg(ap, unsigned long);
51311+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
51312+ break;
51313+ case GR_PTRACE:
51314+ task = va_arg(ap, struct task_struct *);
51315+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
51316+ break;
51317+ case GR_RESOURCE:
51318+ task = va_arg(ap, struct task_struct *);
51319+ cred = __task_cred(task);
51320+ pcred = __task_cred(task->real_parent);
51321+ ulong1 = va_arg(ap, unsigned long);
51322+ str1 = va_arg(ap, char *);
51323+ ulong2 = va_arg(ap, unsigned long);
51324+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51325+ break;
51326+ case GR_CAP:
51327+ task = va_arg(ap, struct task_struct *);
51328+ cred = __task_cred(task);
51329+ pcred = __task_cred(task->real_parent);
51330+ str1 = va_arg(ap, char *);
51331+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51332+ break;
51333+ case GR_SIG:
51334+ str1 = va_arg(ap, char *);
51335+ voidptr = va_arg(ap, void *);
51336+ gr_log_middle_varargs(audit, msg, str1, voidptr);
51337+ break;
51338+ case GR_SIG2:
51339+ task = va_arg(ap, struct task_struct *);
51340+ cred = __task_cred(task);
51341+ pcred = __task_cred(task->real_parent);
51342+ num1 = va_arg(ap, int);
51343+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51344+ break;
51345+ case GR_CRASH1:
51346+ task = va_arg(ap, struct task_struct *);
51347+ cred = __task_cred(task);
51348+ pcred = __task_cred(task->real_parent);
51349+ ulong1 = va_arg(ap, unsigned long);
51350+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
51351+ break;
51352+ case GR_CRASH2:
51353+ task = va_arg(ap, struct task_struct *);
51354+ cred = __task_cred(task);
51355+ pcred = __task_cred(task->real_parent);
51356+ ulong1 = va_arg(ap, unsigned long);
51357+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
51358+ break;
51359+ case GR_RWXMAP:
51360+ file = va_arg(ap, struct file *);
51361+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
51362+ break;
51363+ case GR_PSACCT:
51364+ {
51365+ unsigned int wday, cday;
51366+ __u8 whr, chr;
51367+ __u8 wmin, cmin;
51368+ __u8 wsec, csec;
51369+ char cur_tty[64] = { 0 };
51370+ char parent_tty[64] = { 0 };
51371+
51372+ task = va_arg(ap, struct task_struct *);
51373+ wday = va_arg(ap, unsigned int);
51374+ cday = va_arg(ap, unsigned int);
51375+ whr = va_arg(ap, int);
51376+ chr = va_arg(ap, int);
51377+ wmin = va_arg(ap, int);
51378+ cmin = va_arg(ap, int);
51379+ wsec = va_arg(ap, int);
51380+ csec = va_arg(ap, int);
51381+ ulong1 = va_arg(ap, unsigned long);
51382+ cred = __task_cred(task);
51383+ pcred = __task_cred(task->real_parent);
51384+
51385+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51386+ }
51387+ break;
51388+ default:
51389+ gr_log_middle(audit, msg, ap);
51390+ }
51391+ va_end(ap);
51392+ gr_log_end(audit);
51393+ END_LOCKS(audit);
51394+}
51395diff -urNp linux-3.0.4/grsecurity/grsec_mem.c linux-3.0.4/grsecurity/grsec_mem.c
51396--- linux-3.0.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
51397+++ linux-3.0.4/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
51398@@ -0,0 +1,33 @@
51399+#include <linux/kernel.h>
51400+#include <linux/sched.h>
51401+#include <linux/mm.h>
51402+#include <linux/mman.h>
51403+#include <linux/grinternal.h>
51404+
51405+void
51406+gr_handle_ioperm(void)
51407+{
51408+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
51409+ return;
51410+}
51411+
51412+void
51413+gr_handle_iopl(void)
51414+{
51415+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
51416+ return;
51417+}
51418+
51419+void
51420+gr_handle_mem_readwrite(u64 from, u64 to)
51421+{
51422+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
51423+ return;
51424+}
51425+
51426+void
51427+gr_handle_vm86(void)
51428+{
51429+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
51430+ return;
51431+}
51432diff -urNp linux-3.0.4/grsecurity/grsec_mount.c linux-3.0.4/grsecurity/grsec_mount.c
51433--- linux-3.0.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
51434+++ linux-3.0.4/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
51435@@ -0,0 +1,62 @@
51436+#include <linux/kernel.h>
51437+#include <linux/sched.h>
51438+#include <linux/mount.h>
51439+#include <linux/grsecurity.h>
51440+#include <linux/grinternal.h>
51441+
51442+void
51443+gr_log_remount(const char *devname, const int retval)
51444+{
51445+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51446+ if (grsec_enable_mount && (retval >= 0))
51447+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
51448+#endif
51449+ return;
51450+}
51451+
51452+void
51453+gr_log_unmount(const char *devname, const int retval)
51454+{
51455+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51456+ if (grsec_enable_mount && (retval >= 0))
51457+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
51458+#endif
51459+ return;
51460+}
51461+
51462+void
51463+gr_log_mount(const char *from, const char *to, const int retval)
51464+{
51465+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51466+ if (grsec_enable_mount && (retval >= 0))
51467+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
51468+#endif
51469+ return;
51470+}
51471+
51472+int
51473+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
51474+{
51475+#ifdef CONFIG_GRKERNSEC_ROFS
51476+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
51477+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
51478+ return -EPERM;
51479+ } else
51480+ return 0;
51481+#endif
51482+ return 0;
51483+}
51484+
51485+int
51486+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
51487+{
51488+#ifdef CONFIG_GRKERNSEC_ROFS
51489+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
51490+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
51491+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
51492+ return -EPERM;
51493+ } else
51494+ return 0;
51495+#endif
51496+ return 0;
51497+}
51498diff -urNp linux-3.0.4/grsecurity/grsec_pax.c linux-3.0.4/grsecurity/grsec_pax.c
51499--- linux-3.0.4/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
51500+++ linux-3.0.4/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
51501@@ -0,0 +1,36 @@
51502+#include <linux/kernel.h>
51503+#include <linux/sched.h>
51504+#include <linux/mm.h>
51505+#include <linux/file.h>
51506+#include <linux/grinternal.h>
51507+#include <linux/grsecurity.h>
51508+
51509+void
51510+gr_log_textrel(struct vm_area_struct * vma)
51511+{
51512+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51513+ if (grsec_enable_audit_textrel)
51514+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
51515+#endif
51516+ return;
51517+}
51518+
51519+void
51520+gr_log_rwxmmap(struct file *file)
51521+{
51522+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51523+ if (grsec_enable_log_rwxmaps)
51524+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
51525+#endif
51526+ return;
51527+}
51528+
51529+void
51530+gr_log_rwxmprotect(struct file *file)
51531+{
51532+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51533+ if (grsec_enable_log_rwxmaps)
51534+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
51535+#endif
51536+ return;
51537+}
51538diff -urNp linux-3.0.4/grsecurity/grsec_ptrace.c linux-3.0.4/grsecurity/grsec_ptrace.c
51539--- linux-3.0.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
51540+++ linux-3.0.4/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
51541@@ -0,0 +1,14 @@
51542+#include <linux/kernel.h>
51543+#include <linux/sched.h>
51544+#include <linux/grinternal.h>
51545+#include <linux/grsecurity.h>
51546+
51547+void
51548+gr_audit_ptrace(struct task_struct *task)
51549+{
51550+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51551+ if (grsec_enable_audit_ptrace)
51552+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
51553+#endif
51554+ return;
51555+}
51556diff -urNp linux-3.0.4/grsecurity/grsec_sig.c linux-3.0.4/grsecurity/grsec_sig.c
51557--- linux-3.0.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
51558+++ linux-3.0.4/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
51559@@ -0,0 +1,206 @@
51560+#include <linux/kernel.h>
51561+#include <linux/sched.h>
51562+#include <linux/delay.h>
51563+#include <linux/grsecurity.h>
51564+#include <linux/grinternal.h>
51565+#include <linux/hardirq.h>
51566+
51567+char *signames[] = {
51568+ [SIGSEGV] = "Segmentation fault",
51569+ [SIGILL] = "Illegal instruction",
51570+ [SIGABRT] = "Abort",
51571+ [SIGBUS] = "Invalid alignment/Bus error"
51572+};
51573+
51574+void
51575+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
51576+{
51577+#ifdef CONFIG_GRKERNSEC_SIGNAL
51578+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
51579+ (sig == SIGABRT) || (sig == SIGBUS))) {
51580+ if (t->pid == current->pid) {
51581+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
51582+ } else {
51583+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
51584+ }
51585+ }
51586+#endif
51587+ return;
51588+}
51589+
51590+int
51591+gr_handle_signal(const struct task_struct *p, const int sig)
51592+{
51593+#ifdef CONFIG_GRKERNSEC
51594+ if (current->pid > 1 && gr_check_protected_task(p)) {
51595+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
51596+ return -EPERM;
51597+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
51598+ return -EPERM;
51599+ }
51600+#endif
51601+ return 0;
51602+}
51603+
51604+#ifdef CONFIG_GRKERNSEC
51605+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
51606+
51607+int gr_fake_force_sig(int sig, struct task_struct *t)
51608+{
51609+ unsigned long int flags;
51610+ int ret, blocked, ignored;
51611+ struct k_sigaction *action;
51612+
51613+ spin_lock_irqsave(&t->sighand->siglock, flags);
51614+ action = &t->sighand->action[sig-1];
51615+ ignored = action->sa.sa_handler == SIG_IGN;
51616+ blocked = sigismember(&t->blocked, sig);
51617+ if (blocked || ignored) {
51618+ action->sa.sa_handler = SIG_DFL;
51619+ if (blocked) {
51620+ sigdelset(&t->blocked, sig);
51621+ recalc_sigpending_and_wake(t);
51622+ }
51623+ }
51624+ if (action->sa.sa_handler == SIG_DFL)
51625+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
51626+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
51627+
51628+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
51629+
51630+ return ret;
51631+}
51632+#endif
51633+
51634+#ifdef CONFIG_GRKERNSEC_BRUTE
51635+#define GR_USER_BAN_TIME (15 * 60)
51636+
51637+static int __get_dumpable(unsigned long mm_flags)
51638+{
51639+ int ret;
51640+
51641+ ret = mm_flags & MMF_DUMPABLE_MASK;
51642+ return (ret >= 2) ? 2 : ret;
51643+}
51644+#endif
51645+
51646+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
51647+{
51648+#ifdef CONFIG_GRKERNSEC_BRUTE
51649+ uid_t uid = 0;
51650+
51651+ if (!grsec_enable_brute)
51652+ return;
51653+
51654+ rcu_read_lock();
51655+ read_lock(&tasklist_lock);
51656+ read_lock(&grsec_exec_file_lock);
51657+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
51658+ p->real_parent->brute = 1;
51659+ else {
51660+ const struct cred *cred = __task_cred(p), *cred2;
51661+ struct task_struct *tsk, *tsk2;
51662+
51663+ if (!__get_dumpable(mm_flags) && cred->uid) {
51664+ struct user_struct *user;
51665+
51666+ uid = cred->uid;
51667+
51668+ /* this is put upon execution past expiration */
51669+ user = find_user(uid);
51670+ if (user == NULL)
51671+ goto unlock;
51672+ user->banned = 1;
51673+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
51674+ if (user->ban_expires == ~0UL)
51675+ user->ban_expires--;
51676+
51677+ do_each_thread(tsk2, tsk) {
51678+ cred2 = __task_cred(tsk);
51679+ if (tsk != p && cred2->uid == uid)
51680+ gr_fake_force_sig(SIGKILL, tsk);
51681+ } while_each_thread(tsk2, tsk);
51682+ }
51683+ }
51684+unlock:
51685+ read_unlock(&grsec_exec_file_lock);
51686+ read_unlock(&tasklist_lock);
51687+ rcu_read_unlock();
51688+
51689+ if (uid)
51690+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
51691+
51692+#endif
51693+ return;
51694+}
51695+
51696+void gr_handle_brute_check(void)
51697+{
51698+#ifdef CONFIG_GRKERNSEC_BRUTE
51699+ if (current->brute)
51700+ msleep(30 * 1000);
51701+#endif
51702+ return;
51703+}
51704+
51705+void gr_handle_kernel_exploit(void)
51706+{
51707+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
51708+ const struct cred *cred;
51709+ struct task_struct *tsk, *tsk2;
51710+ struct user_struct *user;
51711+ uid_t uid;
51712+
51713+ if (in_irq() || in_serving_softirq() || in_nmi())
51714+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
51715+
51716+ uid = current_uid();
51717+
51718+ if (uid == 0)
51719+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
51720+ else {
51721+ /* kill all the processes of this user, hold a reference
51722+ to their creds struct, and prevent them from creating
51723+ another process until system reset
51724+ */
51725+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
51726+ /* we intentionally leak this ref */
51727+ user = get_uid(current->cred->user);
51728+ if (user) {
51729+ user->banned = 1;
51730+ user->ban_expires = ~0UL;
51731+ }
51732+
51733+ read_lock(&tasklist_lock);
51734+ do_each_thread(tsk2, tsk) {
51735+ cred = __task_cred(tsk);
51736+ if (cred->uid == uid)
51737+ gr_fake_force_sig(SIGKILL, tsk);
51738+ } while_each_thread(tsk2, tsk);
51739+ read_unlock(&tasklist_lock);
51740+ }
51741+#endif
51742+}
51743+
51744+int __gr_process_user_ban(struct user_struct *user)
51745+{
51746+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51747+ if (unlikely(user->banned)) {
51748+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
51749+ user->banned = 0;
51750+ user->ban_expires = 0;
51751+ free_uid(user);
51752+ } else
51753+ return -EPERM;
51754+ }
51755+#endif
51756+ return 0;
51757+}
51758+
51759+int gr_process_user_ban(void)
51760+{
51761+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51762+ return __gr_process_user_ban(current->cred->user);
51763+#endif
51764+ return 0;
51765+}
51766diff -urNp linux-3.0.4/grsecurity/grsec_sock.c linux-3.0.4/grsecurity/grsec_sock.c
51767--- linux-3.0.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
51768+++ linux-3.0.4/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
51769@@ -0,0 +1,244 @@
51770+#include <linux/kernel.h>
51771+#include <linux/module.h>
51772+#include <linux/sched.h>
51773+#include <linux/file.h>
51774+#include <linux/net.h>
51775+#include <linux/in.h>
51776+#include <linux/ip.h>
51777+#include <net/sock.h>
51778+#include <net/inet_sock.h>
51779+#include <linux/grsecurity.h>
51780+#include <linux/grinternal.h>
51781+#include <linux/gracl.h>
51782+
51783+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
51784+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
51785+
51786+EXPORT_SYMBOL(gr_search_udp_recvmsg);
51787+EXPORT_SYMBOL(gr_search_udp_sendmsg);
51788+
51789+#ifdef CONFIG_UNIX_MODULE
51790+EXPORT_SYMBOL(gr_acl_handle_unix);
51791+EXPORT_SYMBOL(gr_acl_handle_mknod);
51792+EXPORT_SYMBOL(gr_handle_chroot_unix);
51793+EXPORT_SYMBOL(gr_handle_create);
51794+#endif
51795+
51796+#ifdef CONFIG_GRKERNSEC
51797+#define gr_conn_table_size 32749
51798+struct conn_table_entry {
51799+ struct conn_table_entry *next;
51800+ struct signal_struct *sig;
51801+};
51802+
51803+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
51804+DEFINE_SPINLOCK(gr_conn_table_lock);
51805+
51806+extern const char * gr_socktype_to_name(unsigned char type);
51807+extern const char * gr_proto_to_name(unsigned char proto);
51808+extern const char * gr_sockfamily_to_name(unsigned char family);
51809+
51810+static __inline__ int
51811+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
51812+{
51813+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
51814+}
51815+
51816+static __inline__ int
51817+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
51818+ __u16 sport, __u16 dport)
51819+{
51820+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
51821+ sig->gr_sport == sport && sig->gr_dport == dport))
51822+ return 1;
51823+ else
51824+ return 0;
51825+}
51826+
51827+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
51828+{
51829+ struct conn_table_entry **match;
51830+ unsigned int index;
51831+
51832+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51833+ sig->gr_sport, sig->gr_dport,
51834+ gr_conn_table_size);
51835+
51836+ newent->sig = sig;
51837+
51838+ match = &gr_conn_table[index];
51839+ newent->next = *match;
51840+ *match = newent;
51841+
51842+ return;
51843+}
51844+
51845+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
51846+{
51847+ struct conn_table_entry *match, *last = NULL;
51848+ unsigned int index;
51849+
51850+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51851+ sig->gr_sport, sig->gr_dport,
51852+ gr_conn_table_size);
51853+
51854+ match = gr_conn_table[index];
51855+ while (match && !conn_match(match->sig,
51856+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
51857+ sig->gr_dport)) {
51858+ last = match;
51859+ match = match->next;
51860+ }
51861+
51862+ if (match) {
51863+ if (last)
51864+ last->next = match->next;
51865+ else
51866+ gr_conn_table[index] = NULL;
51867+ kfree(match);
51868+ }
51869+
51870+ return;
51871+}
51872+
51873+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
51874+ __u16 sport, __u16 dport)
51875+{
51876+ struct conn_table_entry *match;
51877+ unsigned int index;
51878+
51879+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
51880+
51881+ match = gr_conn_table[index];
51882+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
51883+ match = match->next;
51884+
51885+ if (match)
51886+ return match->sig;
51887+ else
51888+ return NULL;
51889+}
51890+
51891+#endif
51892+
51893+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
51894+{
51895+#ifdef CONFIG_GRKERNSEC
51896+ struct signal_struct *sig = task->signal;
51897+ struct conn_table_entry *newent;
51898+
51899+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
51900+ if (newent == NULL)
51901+ return;
51902+ /* no bh lock needed since we are called with bh disabled */
51903+ spin_lock(&gr_conn_table_lock);
51904+ gr_del_task_from_ip_table_nolock(sig);
51905+ sig->gr_saddr = inet->inet_rcv_saddr;
51906+ sig->gr_daddr = inet->inet_daddr;
51907+ sig->gr_sport = inet->inet_sport;
51908+ sig->gr_dport = inet->inet_dport;
51909+ gr_add_to_task_ip_table_nolock(sig, newent);
51910+ spin_unlock(&gr_conn_table_lock);
51911+#endif
51912+ return;
51913+}
51914+
51915+void gr_del_task_from_ip_table(struct task_struct *task)
51916+{
51917+#ifdef CONFIG_GRKERNSEC
51918+ spin_lock_bh(&gr_conn_table_lock);
51919+ gr_del_task_from_ip_table_nolock(task->signal);
51920+ spin_unlock_bh(&gr_conn_table_lock);
51921+#endif
51922+ return;
51923+}
51924+
51925+void
51926+gr_attach_curr_ip(const struct sock *sk)
51927+{
51928+#ifdef CONFIG_GRKERNSEC
51929+ struct signal_struct *p, *set;
51930+ const struct inet_sock *inet = inet_sk(sk);
51931+
51932+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
51933+ return;
51934+
51935+ set = current->signal;
51936+
51937+ spin_lock_bh(&gr_conn_table_lock);
51938+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
51939+ inet->inet_dport, inet->inet_sport);
51940+ if (unlikely(p != NULL)) {
51941+ set->curr_ip = p->curr_ip;
51942+ set->used_accept = 1;
51943+ gr_del_task_from_ip_table_nolock(p);
51944+ spin_unlock_bh(&gr_conn_table_lock);
51945+ return;
51946+ }
51947+ spin_unlock_bh(&gr_conn_table_lock);
51948+
51949+ set->curr_ip = inet->inet_daddr;
51950+ set->used_accept = 1;
51951+#endif
51952+ return;
51953+}
51954+
51955+int
51956+gr_handle_sock_all(const int family, const int type, const int protocol)
51957+{
51958+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51959+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
51960+ (family != AF_UNIX)) {
51961+ if (family == AF_INET)
51962+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
51963+ else
51964+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
51965+ return -EACCES;
51966+ }
51967+#endif
51968+ return 0;
51969+}
51970+
51971+int
51972+gr_handle_sock_server(const struct sockaddr *sck)
51973+{
51974+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51975+ if (grsec_enable_socket_server &&
51976+ in_group_p(grsec_socket_server_gid) &&
51977+ sck && (sck->sa_family != AF_UNIX) &&
51978+ (sck->sa_family != AF_LOCAL)) {
51979+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51980+ return -EACCES;
51981+ }
51982+#endif
51983+ return 0;
51984+}
51985+
51986+int
51987+gr_handle_sock_server_other(const struct sock *sck)
51988+{
51989+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51990+ if (grsec_enable_socket_server &&
51991+ in_group_p(grsec_socket_server_gid) &&
51992+ sck && (sck->sk_family != AF_UNIX) &&
51993+ (sck->sk_family != AF_LOCAL)) {
51994+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51995+ return -EACCES;
51996+ }
51997+#endif
51998+ return 0;
51999+}
52000+
52001+int
52002+gr_handle_sock_client(const struct sockaddr *sck)
52003+{
52004+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52005+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
52006+ sck && (sck->sa_family != AF_UNIX) &&
52007+ (sck->sa_family != AF_LOCAL)) {
52008+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
52009+ return -EACCES;
52010+ }
52011+#endif
52012+ return 0;
52013+}
52014diff -urNp linux-3.0.4/grsecurity/grsec_sysctl.c linux-3.0.4/grsecurity/grsec_sysctl.c
52015--- linux-3.0.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
52016+++ linux-3.0.4/grsecurity/grsec_sysctl.c 2011-08-25 17:26:15.000000000 -0400
52017@@ -0,0 +1,433 @@
52018+#include <linux/kernel.h>
52019+#include <linux/sched.h>
52020+#include <linux/sysctl.h>
52021+#include <linux/grsecurity.h>
52022+#include <linux/grinternal.h>
52023+
52024+int
52025+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
52026+{
52027+#ifdef CONFIG_GRKERNSEC_SYSCTL
52028+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
52029+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
52030+ return -EACCES;
52031+ }
52032+#endif
52033+ return 0;
52034+}
52035+
52036+#ifdef CONFIG_GRKERNSEC_ROFS
52037+static int __maybe_unused one = 1;
52038+#endif
52039+
52040+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
52041+struct ctl_table grsecurity_table[] = {
52042+#ifdef CONFIG_GRKERNSEC_SYSCTL
52043+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
52044+#ifdef CONFIG_GRKERNSEC_IO
52045+ {
52046+ .procname = "disable_priv_io",
52047+ .data = &grsec_disable_privio,
52048+ .maxlen = sizeof(int),
52049+ .mode = 0600,
52050+ .proc_handler = &proc_dointvec,
52051+ },
52052+#endif
52053+#endif
52054+#ifdef CONFIG_GRKERNSEC_LINK
52055+ {
52056+ .procname = "linking_restrictions",
52057+ .data = &grsec_enable_link,
52058+ .maxlen = sizeof(int),
52059+ .mode = 0600,
52060+ .proc_handler = &proc_dointvec,
52061+ },
52062+#endif
52063+#ifdef CONFIG_GRKERNSEC_BRUTE
52064+ {
52065+ .procname = "deter_bruteforce",
52066+ .data = &grsec_enable_brute,
52067+ .maxlen = sizeof(int),
52068+ .mode = 0600,
52069+ .proc_handler = &proc_dointvec,
52070+ },
52071+#endif
52072+#ifdef CONFIG_GRKERNSEC_FIFO
52073+ {
52074+ .procname = "fifo_restrictions",
52075+ .data = &grsec_enable_fifo,
52076+ .maxlen = sizeof(int),
52077+ .mode = 0600,
52078+ .proc_handler = &proc_dointvec,
52079+ },
52080+#endif
52081+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52082+ {
52083+ .procname = "ip_blackhole",
52084+ .data = &grsec_enable_blackhole,
52085+ .maxlen = sizeof(int),
52086+ .mode = 0600,
52087+ .proc_handler = &proc_dointvec,
52088+ },
52089+ {
52090+ .procname = "lastack_retries",
52091+ .data = &grsec_lastack_retries,
52092+ .maxlen = sizeof(int),
52093+ .mode = 0600,
52094+ .proc_handler = &proc_dointvec,
52095+ },
52096+#endif
52097+#ifdef CONFIG_GRKERNSEC_EXECLOG
52098+ {
52099+ .procname = "exec_logging",
52100+ .data = &grsec_enable_execlog,
52101+ .maxlen = sizeof(int),
52102+ .mode = 0600,
52103+ .proc_handler = &proc_dointvec,
52104+ },
52105+#endif
52106+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52107+ {
52108+ .procname = "rwxmap_logging",
52109+ .data = &grsec_enable_log_rwxmaps,
52110+ .maxlen = sizeof(int),
52111+ .mode = 0600,
52112+ .proc_handler = &proc_dointvec,
52113+ },
52114+#endif
52115+#ifdef CONFIG_GRKERNSEC_SIGNAL
52116+ {
52117+ .procname = "signal_logging",
52118+ .data = &grsec_enable_signal,
52119+ .maxlen = sizeof(int),
52120+ .mode = 0600,
52121+ .proc_handler = &proc_dointvec,
52122+ },
52123+#endif
52124+#ifdef CONFIG_GRKERNSEC_FORKFAIL
52125+ {
52126+ .procname = "forkfail_logging",
52127+ .data = &grsec_enable_forkfail,
52128+ .maxlen = sizeof(int),
52129+ .mode = 0600,
52130+ .proc_handler = &proc_dointvec,
52131+ },
52132+#endif
52133+#ifdef CONFIG_GRKERNSEC_TIME
52134+ {
52135+ .procname = "timechange_logging",
52136+ .data = &grsec_enable_time,
52137+ .maxlen = sizeof(int),
52138+ .mode = 0600,
52139+ .proc_handler = &proc_dointvec,
52140+ },
52141+#endif
52142+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52143+ {
52144+ .procname = "chroot_deny_shmat",
52145+ .data = &grsec_enable_chroot_shmat,
52146+ .maxlen = sizeof(int),
52147+ .mode = 0600,
52148+ .proc_handler = &proc_dointvec,
52149+ },
52150+#endif
52151+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52152+ {
52153+ .procname = "chroot_deny_unix",
52154+ .data = &grsec_enable_chroot_unix,
52155+ .maxlen = sizeof(int),
52156+ .mode = 0600,
52157+ .proc_handler = &proc_dointvec,
52158+ },
52159+#endif
52160+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52161+ {
52162+ .procname = "chroot_deny_mount",
52163+ .data = &grsec_enable_chroot_mount,
52164+ .maxlen = sizeof(int),
52165+ .mode = 0600,
52166+ .proc_handler = &proc_dointvec,
52167+ },
52168+#endif
52169+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52170+ {
52171+ .procname = "chroot_deny_fchdir",
52172+ .data = &grsec_enable_chroot_fchdir,
52173+ .maxlen = sizeof(int),
52174+ .mode = 0600,
52175+ .proc_handler = &proc_dointvec,
52176+ },
52177+#endif
52178+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52179+ {
52180+ .procname = "chroot_deny_chroot",
52181+ .data = &grsec_enable_chroot_double,
52182+ .maxlen = sizeof(int),
52183+ .mode = 0600,
52184+ .proc_handler = &proc_dointvec,
52185+ },
52186+#endif
52187+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52188+ {
52189+ .procname = "chroot_deny_pivot",
52190+ .data = &grsec_enable_chroot_pivot,
52191+ .maxlen = sizeof(int),
52192+ .mode = 0600,
52193+ .proc_handler = &proc_dointvec,
52194+ },
52195+#endif
52196+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52197+ {
52198+ .procname = "chroot_enforce_chdir",
52199+ .data = &grsec_enable_chroot_chdir,
52200+ .maxlen = sizeof(int),
52201+ .mode = 0600,
52202+ .proc_handler = &proc_dointvec,
52203+ },
52204+#endif
52205+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52206+ {
52207+ .procname = "chroot_deny_chmod",
52208+ .data = &grsec_enable_chroot_chmod,
52209+ .maxlen = sizeof(int),
52210+ .mode = 0600,
52211+ .proc_handler = &proc_dointvec,
52212+ },
52213+#endif
52214+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52215+ {
52216+ .procname = "chroot_deny_mknod",
52217+ .data = &grsec_enable_chroot_mknod,
52218+ .maxlen = sizeof(int),
52219+ .mode = 0600,
52220+ .proc_handler = &proc_dointvec,
52221+ },
52222+#endif
52223+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52224+ {
52225+ .procname = "chroot_restrict_nice",
52226+ .data = &grsec_enable_chroot_nice,
52227+ .maxlen = sizeof(int),
52228+ .mode = 0600,
52229+ .proc_handler = &proc_dointvec,
52230+ },
52231+#endif
52232+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52233+ {
52234+ .procname = "chroot_execlog",
52235+ .data = &grsec_enable_chroot_execlog,
52236+ .maxlen = sizeof(int),
52237+ .mode = 0600,
52238+ .proc_handler = &proc_dointvec,
52239+ },
52240+#endif
52241+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52242+ {
52243+ .procname = "chroot_caps",
52244+ .data = &grsec_enable_chroot_caps,
52245+ .maxlen = sizeof(int),
52246+ .mode = 0600,
52247+ .proc_handler = &proc_dointvec,
52248+ },
52249+#endif
52250+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52251+ {
52252+ .procname = "chroot_deny_sysctl",
52253+ .data = &grsec_enable_chroot_sysctl,
52254+ .maxlen = sizeof(int),
52255+ .mode = 0600,
52256+ .proc_handler = &proc_dointvec,
52257+ },
52258+#endif
52259+#ifdef CONFIG_GRKERNSEC_TPE
52260+ {
52261+ .procname = "tpe",
52262+ .data = &grsec_enable_tpe,
52263+ .maxlen = sizeof(int),
52264+ .mode = 0600,
52265+ .proc_handler = &proc_dointvec,
52266+ },
52267+ {
52268+ .procname = "tpe_gid",
52269+ .data = &grsec_tpe_gid,
52270+ .maxlen = sizeof(int),
52271+ .mode = 0600,
52272+ .proc_handler = &proc_dointvec,
52273+ },
52274+#endif
52275+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52276+ {
52277+ .procname = "tpe_invert",
52278+ .data = &grsec_enable_tpe_invert,
52279+ .maxlen = sizeof(int),
52280+ .mode = 0600,
52281+ .proc_handler = &proc_dointvec,
52282+ },
52283+#endif
52284+#ifdef CONFIG_GRKERNSEC_TPE_ALL
52285+ {
52286+ .procname = "tpe_restrict_all",
52287+ .data = &grsec_enable_tpe_all,
52288+ .maxlen = sizeof(int),
52289+ .mode = 0600,
52290+ .proc_handler = &proc_dointvec,
52291+ },
52292+#endif
52293+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52294+ {
52295+ .procname = "socket_all",
52296+ .data = &grsec_enable_socket_all,
52297+ .maxlen = sizeof(int),
52298+ .mode = 0600,
52299+ .proc_handler = &proc_dointvec,
52300+ },
52301+ {
52302+ .procname = "socket_all_gid",
52303+ .data = &grsec_socket_all_gid,
52304+ .maxlen = sizeof(int),
52305+ .mode = 0600,
52306+ .proc_handler = &proc_dointvec,
52307+ },
52308+#endif
52309+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52310+ {
52311+ .procname = "socket_client",
52312+ .data = &grsec_enable_socket_client,
52313+ .maxlen = sizeof(int),
52314+ .mode = 0600,
52315+ .proc_handler = &proc_dointvec,
52316+ },
52317+ {
52318+ .procname = "socket_client_gid",
52319+ .data = &grsec_socket_client_gid,
52320+ .maxlen = sizeof(int),
52321+ .mode = 0600,
52322+ .proc_handler = &proc_dointvec,
52323+ },
52324+#endif
52325+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52326+ {
52327+ .procname = "socket_server",
52328+ .data = &grsec_enable_socket_server,
52329+ .maxlen = sizeof(int),
52330+ .mode = 0600,
52331+ .proc_handler = &proc_dointvec,
52332+ },
52333+ {
52334+ .procname = "socket_server_gid",
52335+ .data = &grsec_socket_server_gid,
52336+ .maxlen = sizeof(int),
52337+ .mode = 0600,
52338+ .proc_handler = &proc_dointvec,
52339+ },
52340+#endif
52341+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
52342+ {
52343+ .procname = "audit_group",
52344+ .data = &grsec_enable_group,
52345+ .maxlen = sizeof(int),
52346+ .mode = 0600,
52347+ .proc_handler = &proc_dointvec,
52348+ },
52349+ {
52350+ .procname = "audit_gid",
52351+ .data = &grsec_audit_gid,
52352+ .maxlen = sizeof(int),
52353+ .mode = 0600,
52354+ .proc_handler = &proc_dointvec,
52355+ },
52356+#endif
52357+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52358+ {
52359+ .procname = "audit_chdir",
52360+ .data = &grsec_enable_chdir,
52361+ .maxlen = sizeof(int),
52362+ .mode = 0600,
52363+ .proc_handler = &proc_dointvec,
52364+ },
52365+#endif
52366+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52367+ {
52368+ .procname = "audit_mount",
52369+ .data = &grsec_enable_mount,
52370+ .maxlen = sizeof(int),
52371+ .mode = 0600,
52372+ .proc_handler = &proc_dointvec,
52373+ },
52374+#endif
52375+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52376+ {
52377+ .procname = "audit_textrel",
52378+ .data = &grsec_enable_audit_textrel,
52379+ .maxlen = sizeof(int),
52380+ .mode = 0600,
52381+ .proc_handler = &proc_dointvec,
52382+ },
52383+#endif
52384+#ifdef CONFIG_GRKERNSEC_DMESG
52385+ {
52386+ .procname = "dmesg",
52387+ .data = &grsec_enable_dmesg,
52388+ .maxlen = sizeof(int),
52389+ .mode = 0600,
52390+ .proc_handler = &proc_dointvec,
52391+ },
52392+#endif
52393+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52394+ {
52395+ .procname = "chroot_findtask",
52396+ .data = &grsec_enable_chroot_findtask,
52397+ .maxlen = sizeof(int),
52398+ .mode = 0600,
52399+ .proc_handler = &proc_dointvec,
52400+ },
52401+#endif
52402+#ifdef CONFIG_GRKERNSEC_RESLOG
52403+ {
52404+ .procname = "resource_logging",
52405+ .data = &grsec_resource_logging,
52406+ .maxlen = sizeof(int),
52407+ .mode = 0600,
52408+ .proc_handler = &proc_dointvec,
52409+ },
52410+#endif
52411+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52412+ {
52413+ .procname = "audit_ptrace",
52414+ .data = &grsec_enable_audit_ptrace,
52415+ .maxlen = sizeof(int),
52416+ .mode = 0600,
52417+ .proc_handler = &proc_dointvec,
52418+ },
52419+#endif
52420+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52421+ {
52422+ .procname = "harden_ptrace",
52423+ .data = &grsec_enable_harden_ptrace,
52424+ .maxlen = sizeof(int),
52425+ .mode = 0600,
52426+ .proc_handler = &proc_dointvec,
52427+ },
52428+#endif
52429+ {
52430+ .procname = "grsec_lock",
52431+ .data = &grsec_lock,
52432+ .maxlen = sizeof(int),
52433+ .mode = 0600,
52434+ .proc_handler = &proc_dointvec,
52435+ },
52436+#endif
52437+#ifdef CONFIG_GRKERNSEC_ROFS
52438+ {
52439+ .procname = "romount_protect",
52440+ .data = &grsec_enable_rofs,
52441+ .maxlen = sizeof(int),
52442+ .mode = 0600,
52443+ .proc_handler = &proc_dointvec_minmax,
52444+ .extra1 = &one,
52445+ .extra2 = &one,
52446+ },
52447+#endif
52448+ { }
52449+};
52450+#endif
52451diff -urNp linux-3.0.4/grsecurity/grsec_time.c linux-3.0.4/grsecurity/grsec_time.c
52452--- linux-3.0.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
52453+++ linux-3.0.4/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
52454@@ -0,0 +1,16 @@
52455+#include <linux/kernel.h>
52456+#include <linux/sched.h>
52457+#include <linux/grinternal.h>
52458+#include <linux/module.h>
52459+
52460+void
52461+gr_log_timechange(void)
52462+{
52463+#ifdef CONFIG_GRKERNSEC_TIME
52464+ if (grsec_enable_time)
52465+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
52466+#endif
52467+ return;
52468+}
52469+
52470+EXPORT_SYMBOL(gr_log_timechange);
52471diff -urNp linux-3.0.4/grsecurity/grsec_tpe.c linux-3.0.4/grsecurity/grsec_tpe.c
52472--- linux-3.0.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
52473+++ linux-3.0.4/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
52474@@ -0,0 +1,39 @@
52475+#include <linux/kernel.h>
52476+#include <linux/sched.h>
52477+#include <linux/file.h>
52478+#include <linux/fs.h>
52479+#include <linux/grinternal.h>
52480+
52481+extern int gr_acl_tpe_check(void);
52482+
52483+int
52484+gr_tpe_allow(const struct file *file)
52485+{
52486+#ifdef CONFIG_GRKERNSEC
52487+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
52488+ const struct cred *cred = current_cred();
52489+
52490+ if (cred->uid && ((grsec_enable_tpe &&
52491+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52492+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
52493+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
52494+#else
52495+ in_group_p(grsec_tpe_gid)
52496+#endif
52497+ ) || gr_acl_tpe_check()) &&
52498+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
52499+ (inode->i_mode & S_IWOTH))))) {
52500+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52501+ return 0;
52502+ }
52503+#ifdef CONFIG_GRKERNSEC_TPE_ALL
52504+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
52505+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
52506+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
52507+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52508+ return 0;
52509+ }
52510+#endif
52511+#endif
52512+ return 1;
52513+}
52514diff -urNp linux-3.0.4/grsecurity/grsum.c linux-3.0.4/grsecurity/grsum.c
52515--- linux-3.0.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
52516+++ linux-3.0.4/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
52517@@ -0,0 +1,61 @@
52518+#include <linux/err.h>
52519+#include <linux/kernel.h>
52520+#include <linux/sched.h>
52521+#include <linux/mm.h>
52522+#include <linux/scatterlist.h>
52523+#include <linux/crypto.h>
52524+#include <linux/gracl.h>
52525+
52526+
52527+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
52528+#error "crypto and sha256 must be built into the kernel"
52529+#endif
52530+
52531+int
52532+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
52533+{
52534+ char *p;
52535+ struct crypto_hash *tfm;
52536+ struct hash_desc desc;
52537+ struct scatterlist sg;
52538+ unsigned char temp_sum[GR_SHA_LEN];
52539+ volatile int retval = 0;
52540+ volatile int dummy = 0;
52541+ unsigned int i;
52542+
52543+ sg_init_table(&sg, 1);
52544+
52545+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
52546+ if (IS_ERR(tfm)) {
52547+ /* should never happen, since sha256 should be built in */
52548+ return 1;
52549+ }
52550+
52551+ desc.tfm = tfm;
52552+ desc.flags = 0;
52553+
52554+ crypto_hash_init(&desc);
52555+
52556+ p = salt;
52557+ sg_set_buf(&sg, p, GR_SALT_LEN);
52558+ crypto_hash_update(&desc, &sg, sg.length);
52559+
52560+ p = entry->pw;
52561+ sg_set_buf(&sg, p, strlen(p));
52562+
52563+ crypto_hash_update(&desc, &sg, sg.length);
52564+
52565+ crypto_hash_final(&desc, temp_sum);
52566+
52567+ memset(entry->pw, 0, GR_PW_LEN);
52568+
52569+ for (i = 0; i < GR_SHA_LEN; i++)
52570+ if (sum[i] != temp_sum[i])
52571+ retval = 1;
52572+ else
52573+ dummy = 1; // waste a cycle
52574+
52575+ crypto_free_hash(tfm);
52576+
52577+ return retval;
52578+}
52579diff -urNp linux-3.0.4/grsecurity/Kconfig linux-3.0.4/grsecurity/Kconfig
52580--- linux-3.0.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
52581+++ linux-3.0.4/grsecurity/Kconfig 2011-08-25 17:25:34.000000000 -0400
52582@@ -0,0 +1,1038 @@
52583+#
52584+# grecurity configuration
52585+#
52586+
52587+menu "Grsecurity"
52588+
52589+config GRKERNSEC
52590+ bool "Grsecurity"
52591+ select CRYPTO
52592+ select CRYPTO_SHA256
52593+ help
52594+ If you say Y here, you will be able to configure many features
52595+ that will enhance the security of your system. It is highly
52596+ recommended that you say Y here and read through the help
52597+ for each option so that you fully understand the features and
52598+ can evaluate their usefulness for your machine.
52599+
52600+choice
52601+ prompt "Security Level"
52602+ depends on GRKERNSEC
52603+ default GRKERNSEC_CUSTOM
52604+
52605+config GRKERNSEC_LOW
52606+ bool "Low"
52607+ select GRKERNSEC_LINK
52608+ select GRKERNSEC_FIFO
52609+ select GRKERNSEC_RANDNET
52610+ select GRKERNSEC_DMESG
52611+ select GRKERNSEC_CHROOT
52612+ select GRKERNSEC_CHROOT_CHDIR
52613+
52614+ help
52615+ If you choose this option, several of the grsecurity options will
52616+ be enabled that will give you greater protection against a number
52617+ of attacks, while assuring that none of your software will have any
52618+ conflicts with the additional security measures. If you run a lot
52619+ of unusual software, or you are having problems with the higher
52620+ security levels, you should say Y here. With this option, the
52621+ following features are enabled:
52622+
52623+ - Linking restrictions
52624+ - FIFO restrictions
52625+ - Restricted dmesg
52626+ - Enforced chdir("/") on chroot
52627+ - Runtime module disabling
52628+
52629+config GRKERNSEC_MEDIUM
52630+ bool "Medium"
52631+ select PAX
52632+ select PAX_EI_PAX
52633+ select PAX_PT_PAX_FLAGS
52634+ select PAX_HAVE_ACL_FLAGS
52635+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52636+ select GRKERNSEC_CHROOT
52637+ select GRKERNSEC_CHROOT_SYSCTL
52638+ select GRKERNSEC_LINK
52639+ select GRKERNSEC_FIFO
52640+ select GRKERNSEC_DMESG
52641+ select GRKERNSEC_RANDNET
52642+ select GRKERNSEC_FORKFAIL
52643+ select GRKERNSEC_TIME
52644+ select GRKERNSEC_SIGNAL
52645+ select GRKERNSEC_CHROOT
52646+ select GRKERNSEC_CHROOT_UNIX
52647+ select GRKERNSEC_CHROOT_MOUNT
52648+ select GRKERNSEC_CHROOT_PIVOT
52649+ select GRKERNSEC_CHROOT_DOUBLE
52650+ select GRKERNSEC_CHROOT_CHDIR
52651+ select GRKERNSEC_CHROOT_MKNOD
52652+ select GRKERNSEC_PROC
52653+ select GRKERNSEC_PROC_USERGROUP
52654+ select PAX_RANDUSTACK
52655+ select PAX_ASLR
52656+ select PAX_RANDMMAP
52657+ select PAX_REFCOUNT if (X86 || SPARC64)
52658+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
52659+
52660+ help
52661+ If you say Y here, several features in addition to those included
52662+ in the low additional security level will be enabled. These
52663+ features provide even more security to your system, though in rare
52664+ cases they may be incompatible with very old or poorly written
52665+ software. If you enable this option, make sure that your auth
52666+ service (identd) is running as gid 1001. With this option,
52667+ the following features (in addition to those provided in the
52668+ low additional security level) will be enabled:
52669+
52670+ - Failed fork logging
52671+ - Time change logging
52672+ - Signal logging
52673+ - Deny mounts in chroot
52674+ - Deny double chrooting
52675+ - Deny sysctl writes in chroot
52676+ - Deny mknod in chroot
52677+ - Deny access to abstract AF_UNIX sockets out of chroot
52678+ - Deny pivot_root in chroot
52679+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
52680+ - /proc restrictions with special GID set to 10 (usually wheel)
52681+ - Address Space Layout Randomization (ASLR)
52682+ - Prevent exploitation of most refcount overflows
52683+ - Bounds checking of copying between the kernel and userland
52684+
52685+config GRKERNSEC_HIGH
52686+ bool "High"
52687+ select GRKERNSEC_LINK
52688+ select GRKERNSEC_FIFO
52689+ select GRKERNSEC_DMESG
52690+ select GRKERNSEC_FORKFAIL
52691+ select GRKERNSEC_TIME
52692+ select GRKERNSEC_SIGNAL
52693+ select GRKERNSEC_CHROOT
52694+ select GRKERNSEC_CHROOT_SHMAT
52695+ select GRKERNSEC_CHROOT_UNIX
52696+ select GRKERNSEC_CHROOT_MOUNT
52697+ select GRKERNSEC_CHROOT_FCHDIR
52698+ select GRKERNSEC_CHROOT_PIVOT
52699+ select GRKERNSEC_CHROOT_DOUBLE
52700+ select GRKERNSEC_CHROOT_CHDIR
52701+ select GRKERNSEC_CHROOT_MKNOD
52702+ select GRKERNSEC_CHROOT_CAPS
52703+ select GRKERNSEC_CHROOT_SYSCTL
52704+ select GRKERNSEC_CHROOT_FINDTASK
52705+ select GRKERNSEC_SYSFS_RESTRICT
52706+ select GRKERNSEC_PROC
52707+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52708+ select GRKERNSEC_HIDESYM
52709+ select GRKERNSEC_BRUTE
52710+ select GRKERNSEC_PROC_USERGROUP
52711+ select GRKERNSEC_KMEM
52712+ select GRKERNSEC_RESLOG
52713+ select GRKERNSEC_RANDNET
52714+ select GRKERNSEC_PROC_ADD
52715+ select GRKERNSEC_CHROOT_CHMOD
52716+ select GRKERNSEC_CHROOT_NICE
52717+ select GRKERNSEC_AUDIT_MOUNT
52718+ select GRKERNSEC_MODHARDEN if (MODULES)
52719+ select GRKERNSEC_HARDEN_PTRACE
52720+ select GRKERNSEC_VM86 if (X86_32)
52721+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
52722+ select PAX
52723+ select PAX_RANDUSTACK
52724+ select PAX_ASLR
52725+ select PAX_RANDMMAP
52726+ select PAX_NOEXEC
52727+ select PAX_MPROTECT
52728+ select PAX_EI_PAX
52729+ select PAX_PT_PAX_FLAGS
52730+ select PAX_HAVE_ACL_FLAGS
52731+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
52732+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
52733+ select PAX_RANDKSTACK if (X86_TSC && X86)
52734+ select PAX_SEGMEXEC if (X86_32)
52735+ select PAX_PAGEEXEC
52736+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
52737+ select PAX_EMUTRAMP if (PARISC)
52738+ select PAX_EMUSIGRT if (PARISC)
52739+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
52740+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
52741+ select PAX_REFCOUNT if (X86 || SPARC64)
52742+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
52743+ help
52744+ If you say Y here, many of the features of grsecurity will be
52745+ enabled, which will protect you against many kinds of attacks
52746+ against your system. The heightened security comes at a cost
52747+ of an increased chance of incompatibilities with rare software
52748+ on your machine. Since this security level enables PaX, you should
52749+ view <http://pax.grsecurity.net> and read about the PaX
52750+ project. While you are there, download chpax and run it on
52751+ binaries that cause problems with PaX. Also remember that
52752+ since the /proc restrictions are enabled, you must run your
52753+ identd as gid 1001. This security level enables the following
52754+ features in addition to those listed in the low and medium
52755+ security levels:
52756+
52757+ - Additional /proc restrictions
52758+ - Chmod restrictions in chroot
52759+ - No signals, ptrace, or viewing of processes outside of chroot
52760+ - Capability restrictions in chroot
52761+ - Deny fchdir out of chroot
52762+ - Priority restrictions in chroot
52763+ - Segmentation-based implementation of PaX
52764+ - Mprotect restrictions
52765+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
52766+ - Kernel stack randomization
52767+ - Mount/unmount/remount logging
52768+ - Kernel symbol hiding
52769+ - Prevention of memory exhaustion-based exploits
52770+ - Hardening of module auto-loading
52771+ - Ptrace restrictions
52772+ - Restricted vm86 mode
52773+ - Restricted sysfs/debugfs
52774+ - Active kernel exploit response
52775+
52776+config GRKERNSEC_CUSTOM
52777+ bool "Custom"
52778+ help
52779+ If you say Y here, you will be able to configure every grsecurity
52780+ option, which allows you to enable many more features that aren't
52781+ covered in the basic security levels. These additional features
52782+ include TPE, socket restrictions, and the sysctl system for
52783+ grsecurity. It is advised that you read through the help for
52784+ each option to determine its usefulness in your situation.
52785+
52786+endchoice
52787+
52788+menu "Address Space Protection"
52789+depends on GRKERNSEC
52790+
52791+config GRKERNSEC_KMEM
52792+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
52793+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
52794+ help
52795+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
52796+ be written to via mmap or otherwise to modify the running kernel.
52797+ /dev/port will also not be allowed to be opened. If you have module
52798+ support disabled, enabling this will close up four ways that are
52799+ currently used to insert malicious code into the running kernel.
52800+ Even with all these features enabled, we still highly recommend that
52801+ you use the RBAC system, as it is still possible for an attacker to
52802+ modify the running kernel through privileged I/O granted by ioperm/iopl.
52803+ If you are not using XFree86, you may be able to stop this additional
52804+ case by enabling the 'Disable privileged I/O' option. Though nothing
52805+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
52806+ but only to video memory, which is the only writing we allow in this
52807+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
52808+ not be allowed to mprotect it with PROT_WRITE later.
52809+ It is highly recommended that you say Y here if you meet all the
52810+ conditions above.
52811+
52812+config GRKERNSEC_VM86
52813+ bool "Restrict VM86 mode"
52814+ depends on X86_32
52815+
52816+ help
52817+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
52818+ make use of a special execution mode on 32bit x86 processors called
52819+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
52820+ video cards and will still work with this option enabled. The purpose
52821+ of the option is to prevent exploitation of emulation errors in
52822+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
52823+ Nearly all users should be able to enable this option.
52824+
52825+config GRKERNSEC_IO
52826+ bool "Disable privileged I/O"
52827+ depends on X86
52828+ select RTC_CLASS
52829+ select RTC_INTF_DEV
52830+ select RTC_DRV_CMOS
52831+
52832+ help
52833+ If you say Y here, all ioperm and iopl calls will return an error.
52834+ Ioperm and iopl can be used to modify the running kernel.
52835+ Unfortunately, some programs need this access to operate properly,
52836+ the most notable of which are XFree86 and hwclock. hwclock can be
52837+ remedied by having RTC support in the kernel, so real-time
52838+ clock support is enabled if this option is enabled, to ensure
52839+ that hwclock operates correctly. XFree86 still will not
52840+ operate correctly with this option enabled, so DO NOT CHOOSE Y
52841+ IF YOU USE XFree86. If you use XFree86 and you still want to
52842+ protect your kernel against modification, use the RBAC system.
52843+
52844+config GRKERNSEC_PROC_MEMMAP
52845+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
52846+ default y if (PAX_NOEXEC || PAX_ASLR)
52847+ depends on PAX_NOEXEC || PAX_ASLR
52848+ help
52849+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
52850+ give no information about the addresses of its mappings if
52851+ PaX features that rely on random addresses are enabled on the task.
52852+ If you use PaX it is greatly recommended that you say Y here as it
52853+ closes up a hole that makes the full ASLR useless for suid
52854+ binaries.
52855+
52856+config GRKERNSEC_BRUTE
52857+ bool "Deter exploit bruteforcing"
52858+ help
52859+ If you say Y here, attempts to bruteforce exploits against forking
52860+ daemons such as apache or sshd, as well as against suid/sgid binaries
52861+ will be deterred. When a child of a forking daemon is killed by PaX
52862+ or crashes due to an illegal instruction or other suspicious signal,
52863+ the parent process will be delayed 30 seconds upon every subsequent
52864+ fork until the administrator is able to assess the situation and
52865+ restart the daemon.
52866+ In the suid/sgid case, the attempt is logged, the user has all their
52867+ processes terminated, and they are prevented from executing any further
52868+ processes for 15 minutes.
52869+ It is recommended that you also enable signal logging in the auditing
52870+ section so that logs are generated when a process triggers a suspicious
52871+ signal.
52872+ If the sysctl option is enabled, a sysctl option with name
52873+ "deter_bruteforce" is created.
52874+
52875+
52876+config GRKERNSEC_MODHARDEN
52877+ bool "Harden module auto-loading"
52878+ depends on MODULES
52879+ help
52880+ If you say Y here, module auto-loading in response to use of some
52881+ feature implemented by an unloaded module will be restricted to
52882+ root users. Enabling this option helps defend against attacks
52883+ by unprivileged users who abuse the auto-loading behavior to
52884+ cause a vulnerable module to load that is then exploited.
52885+
52886+ If this option prevents a legitimate use of auto-loading for a
52887+ non-root user, the administrator can execute modprobe manually
52888+ with the exact name of the module mentioned in the alert log.
52889+ Alternatively, the administrator can add the module to the list
52890+ of modules loaded at boot by modifying init scripts.
52891+
52892+ Modification of init scripts will most likely be needed on
52893+ Ubuntu servers with encrypted home directory support enabled,
52894+ as the first non-root user logging in will cause the ecb(aes),
52895+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
52896+
52897+config GRKERNSEC_HIDESYM
52898+ bool "Hide kernel symbols"
52899+ help
52900+ If you say Y here, getting information on loaded modules, and
52901+ displaying all kernel symbols through a syscall will be restricted
52902+ to users with CAP_SYS_MODULE. For software compatibility reasons,
52903+ /proc/kallsyms will be restricted to the root user. The RBAC
52904+ system can hide that entry even from root.
52905+
52906+ This option also prevents leaking of kernel addresses through
52907+ several /proc entries.
52908+
52909+ Note that this option is only effective provided the following
52910+ conditions are met:
52911+ 1) The kernel using grsecurity is not precompiled by some distribution
52912+ 2) You have also enabled GRKERNSEC_DMESG
52913+ 3) You are using the RBAC system and hiding other files such as your
52914+ kernel image and System.map. Alternatively, enabling this option
52915+ causes the permissions on /boot, /lib/modules, and the kernel
52916+ source directory to change at compile time to prevent
52917+ reading by non-root users.
52918+ If the above conditions are met, this option will aid in providing a
52919+ useful protection against local kernel exploitation of overflows
52920+ and arbitrary read/write vulnerabilities.
52921+
52922+config GRKERNSEC_KERN_LOCKOUT
52923+ bool "Active kernel exploit response"
52924+ depends on X86 || ARM || PPC || SPARC
52925+ help
52926+ If you say Y here, when a PaX alert is triggered due to suspicious
52927+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
52928+ or an OOPs occurs due to bad memory accesses, instead of just
52929+ terminating the offending process (and potentially allowing
52930+ a subsequent exploit from the same user), we will take one of two
52931+ actions:
52932+ If the user was root, we will panic the system
52933+ If the user was non-root, we will log the attempt, terminate
52934+ all processes owned by the user, then prevent them from creating
52935+ any new processes until the system is restarted
52936+ This deters repeated kernel exploitation/bruteforcing attempts
52937+ and is useful for later forensics.
52938+
52939+endmenu
52940+menu "Role Based Access Control Options"
52941+depends on GRKERNSEC
52942+
52943+config GRKERNSEC_RBAC_DEBUG
52944+ bool
52945+
52946+config GRKERNSEC_NO_RBAC
52947+ bool "Disable RBAC system"
52948+ help
52949+ If you say Y here, the /dev/grsec device will be removed from the kernel,
52950+ preventing the RBAC system from being enabled. You should only say Y
52951+ here if you have no intention of using the RBAC system, so as to prevent
52952+ an attacker with root access from misusing the RBAC system to hide files
52953+ and processes when loadable module support and /dev/[k]mem have been
52954+ locked down.
52955+
52956+config GRKERNSEC_ACL_HIDEKERN
52957+ bool "Hide kernel processes"
52958+ help
52959+ If you say Y here, all kernel threads will be hidden to all
52960+ processes but those whose subject has the "view hidden processes"
52961+ flag.
52962+
52963+config GRKERNSEC_ACL_MAXTRIES
52964+ int "Maximum tries before password lockout"
52965+ default 3
52966+ help
52967+ This option enforces the maximum number of times a user can attempt
52968+ to authorize themselves with the grsecurity RBAC system before being
52969+ denied the ability to attempt authorization again for a specified time.
52970+ The lower the number, the harder it will be to brute-force a password.
52971+
52972+config GRKERNSEC_ACL_TIMEOUT
52973+ int "Time to wait after max password tries, in seconds"
52974+ default 30
52975+ help
52976+ This option specifies the time the user must wait after attempting to
52977+ authorize to the RBAC system with the maximum number of invalid
52978+ passwords. The higher the number, the harder it will be to brute-force
52979+ a password.
52980+
52981+endmenu
52982+menu "Filesystem Protections"
52983+depends on GRKERNSEC
52984+
52985+config GRKERNSEC_PROC
52986+ bool "Proc restrictions"
52987+ help
52988+ If you say Y here, the permissions of the /proc filesystem
52989+ will be altered to enhance system security and privacy. You MUST
52990+ choose either a user only restriction or a user and group restriction.
52991+ Depending upon the option you choose, you can either restrict users to
52992+ see only the processes they themselves run, or choose a group that can
52993+ view all processes and files normally restricted to root if you choose
52994+ the "restrict to user only" option. NOTE: If you're running identd as
52995+ a non-root user, you will have to run it as the group you specify here.
52996+
52997+config GRKERNSEC_PROC_USER
52998+ bool "Restrict /proc to user only"
52999+ depends on GRKERNSEC_PROC
53000+ help
53001+ If you say Y here, non-root users will only be able to view their own
53002+ processes, and restricts them from viewing network-related information,
53003+ and viewing kernel symbol and module information.
53004+
53005+config GRKERNSEC_PROC_USERGROUP
53006+ bool "Allow special group"
53007+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
53008+ help
53009+ If you say Y here, you will be able to select a group that will be
53010+ able to view all processes and network-related information. If you've
53011+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
53012+ remain hidden. This option is useful if you want to run identd as
53013+ a non-root user.
53014+
53015+config GRKERNSEC_PROC_GID
53016+ int "GID for special group"
53017+ depends on GRKERNSEC_PROC_USERGROUP
53018+ default 1001
53019+
53020+config GRKERNSEC_PROC_ADD
53021+ bool "Additional restrictions"
53022+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
53023+ help
53024+ If you say Y here, additional restrictions will be placed on
53025+ /proc that keep normal users from viewing device information and
53026+ slabinfo information that could be useful for exploits.
53027+
53028+config GRKERNSEC_LINK
53029+ bool "Linking restrictions"
53030+ help
53031+ If you say Y here, /tmp race exploits will be prevented, since users
53032+ will no longer be able to follow symlinks owned by other users in
53033+ world-writable +t directories (e.g. /tmp), unless the owner of the
53034+ symlink is the owner of the directory. users will also not be
53035+ able to hardlink to files they do not own. If the sysctl option is
53036+ enabled, a sysctl option with name "linking_restrictions" is created.
53037+
53038+config GRKERNSEC_FIFO
53039+ bool "FIFO restrictions"
53040+ help
53041+ If you say Y here, users will not be able to write to FIFOs they don't
53042+ own in world-writable +t directories (e.g. /tmp), unless the owner of
53043+ the FIFO is the same owner of the directory it's held in. If the sysctl
53044+ option is enabled, a sysctl option with name "fifo_restrictions" is
53045+ created.
53046+
53047+config GRKERNSEC_SYSFS_RESTRICT
53048+ bool "Sysfs/debugfs restriction"
53049+ depends on SYSFS
53050+ help
53051+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
53052+ any filesystem normally mounted under it (e.g. debugfs) will only
53053+ be accessible by root. These filesystems generally provide access
53054+ to hardware and debug information that isn't appropriate for unprivileged
53055+ users of the system. Sysfs and debugfs have also become a large source
53056+ of new vulnerabilities, ranging from infoleaks to local compromise.
53057+ There has been very little oversight with an eye toward security involved
53058+ in adding new exporters of information to these filesystems, so their
53059+ use is discouraged.
53060+ This option is equivalent to a chmod 0700 of the mount paths.
53061+
53062+config GRKERNSEC_ROFS
53063+ bool "Runtime read-only mount protection"
53064+ help
53065+ If you say Y here, a sysctl option with name "romount_protect" will
53066+ be created. By setting this option to 1 at runtime, filesystems
53067+ will be protected in the following ways:
53068+ * No new writable mounts will be allowed
53069+ * Existing read-only mounts won't be able to be remounted read/write
53070+ * Write operations will be denied on all block devices
53071+ This option acts independently of grsec_lock: once it is set to 1,
53072+ it cannot be turned off. Therefore, please be mindful of the resulting
53073+ behavior if this option is enabled in an init script on a read-only
53074+ filesystem. This feature is mainly intended for secure embedded systems.
53075+
53076+config GRKERNSEC_CHROOT
53077+ bool "Chroot jail restrictions"
53078+ help
53079+ If you say Y here, you will be able to choose several options that will
53080+ make breaking out of a chrooted jail much more difficult. If you
53081+ encounter no software incompatibilities with the following options, it
53082+ is recommended that you enable each one.
53083+
53084+config GRKERNSEC_CHROOT_MOUNT
53085+ bool "Deny mounts"
53086+ depends on GRKERNSEC_CHROOT
53087+ help
53088+ If you say Y here, processes inside a chroot will not be able to
53089+ mount or remount filesystems. If the sysctl option is enabled, a
53090+ sysctl option with name "chroot_deny_mount" is created.
53091+
53092+config GRKERNSEC_CHROOT_DOUBLE
53093+ bool "Deny double-chroots"
53094+ depends on GRKERNSEC_CHROOT
53095+ help
53096+ If you say Y here, processes inside a chroot will not be able to chroot
53097+ again outside the chroot. This is a widely used method of breaking
53098+ out of a chroot jail and should not be allowed. If the sysctl
53099+ option is enabled, a sysctl option with name
53100+ "chroot_deny_chroot" is created.
53101+
53102+config GRKERNSEC_CHROOT_PIVOT
53103+ bool "Deny pivot_root in chroot"
53104+ depends on GRKERNSEC_CHROOT
53105+ help
53106+ If you say Y here, processes inside a chroot will not be able to use
53107+ a function called pivot_root() that was introduced in Linux 2.3.41. It
53108+ works similar to chroot in that it changes the root filesystem. This
53109+ function could be misused in a chrooted process to attempt to break out
53110+ of the chroot, and therefore should not be allowed. If the sysctl
53111+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
53112+ created.
53113+
53114+config GRKERNSEC_CHROOT_CHDIR
53115+ bool "Enforce chdir(\"/\") on all chroots"
53116+ depends on GRKERNSEC_CHROOT
53117+ help
53118+ If you say Y here, the current working directory of all newly-chrooted
53119+ applications will be set to the the root directory of the chroot.
53120+ The man page on chroot(2) states:
53121+ Note that this call does not change the current working
53122+ directory, so that `.' can be outside the tree rooted at
53123+ `/'. In particular, the super-user can escape from a
53124+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
53125+
53126+ It is recommended that you say Y here, since it's not known to break
53127+ any software. If the sysctl option is enabled, a sysctl option with
53128+ name "chroot_enforce_chdir" is created.
53129+
53130+config GRKERNSEC_CHROOT_CHMOD
53131+ bool "Deny (f)chmod +s"
53132+ depends on GRKERNSEC_CHROOT
53133+ help
53134+ If you say Y here, processes inside a chroot will not be able to chmod
53135+ or fchmod files to make them have suid or sgid bits. This protects
53136+ against another published method of breaking a chroot. If the sysctl
53137+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
53138+ created.
53139+
53140+config GRKERNSEC_CHROOT_FCHDIR
53141+ bool "Deny fchdir out of chroot"
53142+ depends on GRKERNSEC_CHROOT
53143+ help
53144+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
53145+ to a file descriptor of the chrooting process that points to a directory
53146+ outside the filesystem will be stopped. If the sysctl option
53147+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
53148+
53149+config GRKERNSEC_CHROOT_MKNOD
53150+ bool "Deny mknod"
53151+ depends on GRKERNSEC_CHROOT
53152+ help
53153+ If you say Y here, processes inside a chroot will not be allowed to
53154+ mknod. The problem with using mknod inside a chroot is that it
53155+ would allow an attacker to create a device entry that is the same
53156+ as one on the physical root of your system, which could range from
53157+ anything from the console device to a device for your harddrive (which
53158+ they could then use to wipe the drive or steal data). It is recommended
53159+ that you say Y here, unless you run into software incompatibilities.
53160+ If the sysctl option is enabled, a sysctl option with name
53161+ "chroot_deny_mknod" is created.
53162+
53163+config GRKERNSEC_CHROOT_SHMAT
53164+ bool "Deny shmat() out of chroot"
53165+ depends on GRKERNSEC_CHROOT
53166+ help
53167+ If you say Y here, processes inside a chroot will not be able to attach
53168+ to shared memory segments that were created outside of the chroot jail.
53169+ It is recommended that you say Y here. If the sysctl option is enabled,
53170+ a sysctl option with name "chroot_deny_shmat" is created.
53171+
53172+config GRKERNSEC_CHROOT_UNIX
53173+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
53174+ depends on GRKERNSEC_CHROOT
53175+ help
53176+ If you say Y here, processes inside a chroot will not be able to
53177+ connect to abstract (meaning not belonging to a filesystem) Unix
53178+ domain sockets that were bound outside of a chroot. It is recommended
53179+ that you say Y here. If the sysctl option is enabled, a sysctl option
53180+ with name "chroot_deny_unix" is created.
53181+
53182+config GRKERNSEC_CHROOT_FINDTASK
53183+ bool "Protect outside processes"
53184+ depends on GRKERNSEC_CHROOT
53185+ help
53186+ If you say Y here, processes inside a chroot will not be able to
53187+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
53188+ getsid, or view any process outside of the chroot. If the sysctl
53189+ option is enabled, a sysctl option with name "chroot_findtask" is
53190+ created.
53191+
53192+config GRKERNSEC_CHROOT_NICE
53193+ bool "Restrict priority changes"
53194+ depends on GRKERNSEC_CHROOT
53195+ help
53196+ If you say Y here, processes inside a chroot will not be able to raise
53197+ the priority of processes in the chroot, or alter the priority of
53198+ processes outside the chroot. This provides more security than simply
53199+ removing CAP_SYS_NICE from the process' capability set. If the
53200+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
53201+ is created.
53202+
53203+config GRKERNSEC_CHROOT_SYSCTL
53204+ bool "Deny sysctl writes"
53205+ depends on GRKERNSEC_CHROOT
53206+ help
53207+ If you say Y here, an attacker in a chroot will not be able to
53208+ write to sysctl entries, either by sysctl(2) or through a /proc
53209+ interface. It is strongly recommended that you say Y here. If the
53210+ sysctl option is enabled, a sysctl option with name
53211+ "chroot_deny_sysctl" is created.
53212+
53213+config GRKERNSEC_CHROOT_CAPS
53214+ bool "Capability restrictions"
53215+ depends on GRKERNSEC_CHROOT
53216+ help
53217+ If you say Y here, the capabilities on all root processes within a
53218+ chroot jail will be lowered to stop module insertion, raw i/o,
53219+ system and net admin tasks, rebooting the system, modifying immutable
53220+ files, modifying IPC owned by another, and changing the system time.
53221+ This is left an option because it can break some apps. Disable this
53222+ if your chrooted apps are having problems performing those kinds of
53223+ tasks. If the sysctl option is enabled, a sysctl option with
53224+ name "chroot_caps" is created.
53225+
53226+endmenu
53227+menu "Kernel Auditing"
53228+depends on GRKERNSEC
53229+
53230+config GRKERNSEC_AUDIT_GROUP
53231+ bool "Single group for auditing"
53232+ help
53233+ If you say Y here, the exec, chdir, and (un)mount logging features
53234+ will only operate on a group you specify. This option is recommended
53235+ if you only want to watch certain users instead of having a large
53236+ amount of logs from the entire system. If the sysctl option is enabled,
53237+ a sysctl option with name "audit_group" is created.
53238+
53239+config GRKERNSEC_AUDIT_GID
53240+ int "GID for auditing"
53241+ depends on GRKERNSEC_AUDIT_GROUP
53242+ default 1007
53243+
53244+config GRKERNSEC_EXECLOG
53245+ bool "Exec logging"
53246+ help
53247+ If you say Y here, all execve() calls will be logged (since the
53248+ other exec*() calls are frontends to execve(), all execution
53249+ will be logged). Useful for shell-servers that like to keep track
53250+ of their users. If the sysctl option is enabled, a sysctl option with
53251+ name "exec_logging" is created.
53252+ WARNING: This option when enabled will produce a LOT of logs, especially
53253+ on an active system.
53254+
53255+config GRKERNSEC_RESLOG
53256+ bool "Resource logging"
53257+ help
53258+ If you say Y here, all attempts to overstep resource limits will
53259+ be logged with the resource name, the requested size, and the current
53260+ limit. It is highly recommended that you say Y here. If the sysctl
53261+ option is enabled, a sysctl option with name "resource_logging" is
53262+ created. If the RBAC system is enabled, the sysctl value is ignored.
53263+
53264+config GRKERNSEC_CHROOT_EXECLOG
53265+ bool "Log execs within chroot"
53266+ help
53267+ If you say Y here, all executions inside a chroot jail will be logged
53268+ to syslog. This can cause a large amount of logs if certain
53269+ applications (eg. djb's daemontools) are installed on the system, and
53270+ is therefore left as an option. If the sysctl option is enabled, a
53271+ sysctl option with name "chroot_execlog" is created.
53272+
53273+config GRKERNSEC_AUDIT_PTRACE
53274+ bool "Ptrace logging"
53275+ help
53276+ If you say Y here, all attempts to attach to a process via ptrace
53277+ will be logged. If the sysctl option is enabled, a sysctl option
53278+ with name "audit_ptrace" is created.
53279+
53280+config GRKERNSEC_AUDIT_CHDIR
53281+ bool "Chdir logging"
53282+ help
53283+ If you say Y here, all chdir() calls will be logged. If the sysctl
53284+ option is enabled, a sysctl option with name "audit_chdir" is created.
53285+
53286+config GRKERNSEC_AUDIT_MOUNT
53287+ bool "(Un)Mount logging"
53288+ help
53289+ If you say Y here, all mounts and unmounts will be logged. If the
53290+ sysctl option is enabled, a sysctl option with name "audit_mount" is
53291+ created.
53292+
53293+config GRKERNSEC_SIGNAL
53294+ bool "Signal logging"
53295+ help
53296+ If you say Y here, certain important signals will be logged, such as
53297+ SIGSEGV, which will as a result inform you of when a error in a program
53298+ occurred, which in some cases could mean a possible exploit attempt.
53299+ If the sysctl option is enabled, a sysctl option with name
53300+ "signal_logging" is created.
53301+
53302+config GRKERNSEC_FORKFAIL
53303+ bool "Fork failure logging"
53304+ help
53305+ If you say Y here, all failed fork() attempts will be logged.
53306+ This could suggest a fork bomb, or someone attempting to overstep
53307+ their process limit. If the sysctl option is enabled, a sysctl option
53308+ with name "forkfail_logging" is created.
53309+
53310+config GRKERNSEC_TIME
53311+ bool "Time change logging"
53312+ help
53313+ If you say Y here, any changes of the system clock will be logged.
53314+ If the sysctl option is enabled, a sysctl option with name
53315+ "timechange_logging" is created.
53316+
53317+config GRKERNSEC_PROC_IPADDR
53318+ bool "/proc/<pid>/ipaddr support"
53319+ help
53320+ If you say Y here, a new entry will be added to each /proc/<pid>
53321+ directory that contains the IP address of the person using the task.
53322+ The IP is carried across local TCP and AF_UNIX stream sockets.
53323+ This information can be useful for IDS/IPSes to perform remote response
53324+ to a local attack. The entry is readable by only the owner of the
53325+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
53326+ the RBAC system), and thus does not create privacy concerns.
53327+
53328+config GRKERNSEC_RWXMAP_LOG
53329+ bool 'Denied RWX mmap/mprotect logging'
53330+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
53331+ help
53332+ If you say Y here, calls to mmap() and mprotect() with explicit
53333+ usage of PROT_WRITE and PROT_EXEC together will be logged when
53334+ denied by the PAX_MPROTECT feature. If the sysctl option is
53335+ enabled, a sysctl option with name "rwxmap_logging" is created.
53336+
53337+config GRKERNSEC_AUDIT_TEXTREL
53338+ bool 'ELF text relocations logging (READ HELP)'
53339+ depends on PAX_MPROTECT
53340+ help
53341+ If you say Y here, text relocations will be logged with the filename
53342+ of the offending library or binary. The purpose of the feature is
53343+ to help Linux distribution developers get rid of libraries and
53344+ binaries that need text relocations which hinder the future progress
53345+ of PaX. Only Linux distribution developers should say Y here, and
53346+ never on a production machine, as this option creates an information
53347+ leak that could aid an attacker in defeating the randomization of
53348+ a single memory region. If the sysctl option is enabled, a sysctl
53349+ option with name "audit_textrel" is created.
53350+
53351+endmenu
53352+
53353+menu "Executable Protections"
53354+depends on GRKERNSEC
53355+
53356+config GRKERNSEC_DMESG
53357+ bool "Dmesg(8) restriction"
53358+ help
53359+ If you say Y here, non-root users will not be able to use dmesg(8)
53360+ to view up to the last 4kb of messages in the kernel's log buffer.
53361+ The kernel's log buffer often contains kernel addresses and other
53362+ identifying information useful to an attacker in fingerprinting a
53363+ system for a targeted exploit.
53364+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
53365+ created.
53366+
53367+config GRKERNSEC_HARDEN_PTRACE
53368+ bool "Deter ptrace-based process snooping"
53369+ help
53370+ If you say Y here, TTY sniffers and other malicious monitoring
53371+ programs implemented through ptrace will be defeated. If you
53372+ have been using the RBAC system, this option has already been
53373+ enabled for several years for all users, with the ability to make
53374+ fine-grained exceptions.
53375+
53376+ This option only affects the ability of non-root users to ptrace
53377+ processes that are not a descendent of the ptracing process.
53378+ This means that strace ./binary and gdb ./binary will still work,
53379+ but attaching to arbitrary processes will not. If the sysctl
53380+ option is enabled, a sysctl option with name "harden_ptrace" is
53381+ created.
53382+
53383+config GRKERNSEC_TPE
53384+ bool "Trusted Path Execution (TPE)"
53385+ help
53386+ If you say Y here, you will be able to choose a gid to add to the
53387+ supplementary groups of users you want to mark as "untrusted."
53388+ These users will not be able to execute any files that are not in
53389+ root-owned directories writable only by root. If the sysctl option
53390+ is enabled, a sysctl option with name "tpe" is created.
53391+
53392+config GRKERNSEC_TPE_ALL
53393+ bool "Partially restrict all non-root users"
53394+ depends on GRKERNSEC_TPE
53395+ help
53396+ If you say Y here, all non-root users will be covered under
53397+ a weaker TPE restriction. This is separate from, and in addition to,
53398+ the main TPE options that you have selected elsewhere. Thus, if a
53399+ "trusted" GID is chosen, this restriction applies to even that GID.
53400+ Under this restriction, all non-root users will only be allowed to
53401+ execute files in directories they own that are not group or
53402+ world-writable, or in directories owned by root and writable only by
53403+ root. If the sysctl option is enabled, a sysctl option with name
53404+ "tpe_restrict_all" is created.
53405+
53406+config GRKERNSEC_TPE_INVERT
53407+ bool "Invert GID option"
53408+ depends on GRKERNSEC_TPE
53409+ help
53410+ If you say Y here, the group you specify in the TPE configuration will
53411+ decide what group TPE restrictions will be *disabled* for. This
53412+ option is useful if you want TPE restrictions to be applied to most
53413+ users on the system. If the sysctl option is enabled, a sysctl option
53414+ with name "tpe_invert" is created. Unlike other sysctl options, this
53415+ entry will default to on for backward-compatibility.
53416+
53417+config GRKERNSEC_TPE_GID
53418+ int "GID for untrusted users"
53419+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
53420+ default 1005
53421+ help
53422+ Setting this GID determines what group TPE restrictions will be
53423+ *enabled* for. If the sysctl option is enabled, a sysctl option
53424+ with name "tpe_gid" is created.
53425+
53426+config GRKERNSEC_TPE_GID
53427+ int "GID for trusted users"
53428+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
53429+ default 1005
53430+ help
53431+ Setting this GID determines what group TPE restrictions will be
53432+ *disabled* for. If the sysctl option is enabled, a sysctl option
53433+ with name "tpe_gid" is created.
53434+
53435+endmenu
53436+menu "Network Protections"
53437+depends on GRKERNSEC
53438+
53439+config GRKERNSEC_RANDNET
53440+ bool "Larger entropy pools"
53441+ help
53442+ If you say Y here, the entropy pools used for many features of Linux
53443+ and grsecurity will be doubled in size. Since several grsecurity
53444+ features use additional randomness, it is recommended that you say Y
53445+ here. Saying Y here has a similar effect as modifying
53446+ /proc/sys/kernel/random/poolsize.
53447+
53448+config GRKERNSEC_BLACKHOLE
53449+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
53450+ depends on NET
53451+ help
53452+ If you say Y here, neither TCP resets nor ICMP
53453+ destination-unreachable packets will be sent in response to packets
53454+ sent to ports for which no associated listening process exists.
53455+ This feature supports both IPV4 and IPV6 and exempts the
53456+ loopback interface from blackholing. Enabling this feature
53457+ makes a host more resilient to DoS attacks and reduces network
53458+ visibility against scanners.
53459+
53460+ The blackhole feature as-implemented is equivalent to the FreeBSD
53461+ blackhole feature, as it prevents RST responses to all packets, not
53462+ just SYNs. Under most application behavior this causes no
53463+ problems, but applications (like haproxy) may not close certain
53464+ connections in a way that cleanly terminates them on the remote
53465+ end, leaving the remote host in LAST_ACK state. Because of this
53466+ side-effect and to prevent intentional LAST_ACK DoSes, this
53467+ feature also adds automatic mitigation against such attacks.
53468+ The mitigation drastically reduces the amount of time a socket
53469+ can spend in LAST_ACK state. If you're using haproxy and not
53470+ all servers it connects to have this option enabled, consider
53471+ disabling this feature on the haproxy host.
53472+
53473+ If the sysctl option is enabled, two sysctl options with names
53474+ "ip_blackhole" and "lastack_retries" will be created.
53475+ While "ip_blackhole" takes the standard zero/non-zero on/off
53476+ toggle, "lastack_retries" uses the same kinds of values as
53477+ "tcp_retries1" and "tcp_retries2". The default value of 4
53478+ prevents a socket from lasting more than 45 seconds in LAST_ACK
53479+ state.
53480+
53481+config GRKERNSEC_SOCKET
53482+ bool "Socket restrictions"
53483+ depends on NET
53484+ help
53485+ If you say Y here, you will be able to choose from several options.
53486+ If you assign a GID on your system and add it to the supplementary
53487+ groups of users you want to restrict socket access to, this patch
53488+ will perform up to three things, based on the option(s) you choose.
53489+
53490+config GRKERNSEC_SOCKET_ALL
53491+ bool "Deny any sockets to group"
53492+ depends on GRKERNSEC_SOCKET
53493+ help
53494+ If you say Y here, you will be able to choose a GID of whose users will
53495+ be unable to connect to other hosts from your machine or run server
53496+ applications from your machine. If the sysctl option is enabled, a
53497+ sysctl option with name "socket_all" is created.
53498+
53499+config GRKERNSEC_SOCKET_ALL_GID
53500+ int "GID to deny all sockets for"
53501+ depends on GRKERNSEC_SOCKET_ALL
53502+ default 1004
53503+ help
53504+ Here you can choose the GID to disable socket access for. Remember to
53505+ add the users you want socket access disabled for to the GID
53506+ specified here. If the sysctl option is enabled, a sysctl option
53507+ with name "socket_all_gid" is created.
53508+
53509+config GRKERNSEC_SOCKET_CLIENT
53510+ bool "Deny client sockets to group"
53511+ depends on GRKERNSEC_SOCKET
53512+ help
53513+ If you say Y here, you will be able to choose a GID of whose users will
53514+ be unable to connect to other hosts from your machine, but will be
53515+ able to run servers. If this option is enabled, all users in the group
53516+ you specify will have to use passive mode when initiating ftp transfers
53517+ from the shell on your machine. If the sysctl option is enabled, a
53518+ sysctl option with name "socket_client" is created.
53519+
53520+config GRKERNSEC_SOCKET_CLIENT_GID
53521+ int "GID to deny client sockets for"
53522+ depends on GRKERNSEC_SOCKET_CLIENT
53523+ default 1003
53524+ help
53525+ Here you can choose the GID to disable client socket access for.
53526+ Remember to add the users you want client socket access disabled for to
53527+ the GID specified here. If the sysctl option is enabled, a sysctl
53528+ option with name "socket_client_gid" is created.
53529+
53530+config GRKERNSEC_SOCKET_SERVER
53531+ bool "Deny server sockets to group"
53532+ depends on GRKERNSEC_SOCKET
53533+ help
53534+ If you say Y here, you will be able to choose a GID of whose users will
53535+ be unable to run server applications from your machine. If the sysctl
53536+ option is enabled, a sysctl option with name "socket_server" is created.
53537+
53538+config GRKERNSEC_SOCKET_SERVER_GID
53539+ int "GID to deny server sockets for"
53540+ depends on GRKERNSEC_SOCKET_SERVER
53541+ default 1002
53542+ help
53543+ Here you can choose the GID to disable server socket access for.
53544+ Remember to add the users you want server socket access disabled for to
53545+ the GID specified here. If the sysctl option is enabled, a sysctl
53546+ option with name "socket_server_gid" is created.
53547+
53548+endmenu
53549+menu "Sysctl support"
53550+depends on GRKERNSEC && SYSCTL
53551+
53552+config GRKERNSEC_SYSCTL
53553+ bool "Sysctl support"
53554+ help
53555+ If you say Y here, you will be able to change the options that
53556+ grsecurity runs with at bootup, without having to recompile your
53557+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
53558+ to enable (1) or disable (0) various features. All the sysctl entries
53559+ are mutable until the "grsec_lock" entry is set to a non-zero value.
53560+ All features enabled in the kernel configuration are disabled at boot
53561+ if you do not say Y to the "Turn on features by default" option.
53562+ All options should be set at startup, and the grsec_lock entry should
53563+ be set to a non-zero value after all the options are set.
53564+ *THIS IS EXTREMELY IMPORTANT*
53565+
53566+config GRKERNSEC_SYSCTL_DISTRO
53567+ bool "Extra sysctl support for distro makers (READ HELP)"
53568+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
53569+ help
53570+ If you say Y here, additional sysctl options will be created
53571+ for features that affect processes running as root. Therefore,
53572+ it is critical when using this option that the grsec_lock entry be
53573+ enabled after boot. Only distros with prebuilt kernel packages
53574+ with this option enabled that can ensure grsec_lock is enabled
53575+ after boot should use this option.
53576+ *Failure to set grsec_lock after boot makes all grsec features
53577+ this option covers useless*
53578+
53579+ Currently this option creates the following sysctl entries:
53580+ "Disable Privileged I/O": "disable_priv_io"
53581+
53582+config GRKERNSEC_SYSCTL_ON
53583+ bool "Turn on features by default"
53584+ depends on GRKERNSEC_SYSCTL
53585+ help
53586+ If you say Y here, instead of having all features enabled in the
53587+ kernel configuration disabled at boot time, the features will be
53588+ enabled at boot time. It is recommended you say Y here unless
53589+ there is some reason you would want all sysctl-tunable features to
53590+ be disabled by default. As mentioned elsewhere, it is important
53591+ to enable the grsec_lock entry once you have finished modifying
53592+ the sysctl entries.
53593+
53594+endmenu
53595+menu "Logging Options"
53596+depends on GRKERNSEC
53597+
53598+config GRKERNSEC_FLOODTIME
53599+ int "Seconds in between log messages (minimum)"
53600+ default 10
53601+ help
53602+ This option allows you to enforce the number of seconds between
53603+ grsecurity log messages. The default should be suitable for most
53604+ people, however, if you choose to change it, choose a value small enough
53605+ to allow informative logs to be produced, but large enough to
53606+ prevent flooding.
53607+
53608+config GRKERNSEC_FLOODBURST
53609+ int "Number of messages in a burst (maximum)"
53610+ default 4
53611+ help
53612+ This option allows you to choose the maximum number of messages allowed
53613+ within the flood time interval you chose in a separate option. The
53614+ default should be suitable for most people, however if you find that
53615+ many of your logs are being interpreted as flooding, you may want to
53616+ raise this value.
53617+
53618+endmenu
53619+
53620+endmenu
53621diff -urNp linux-3.0.4/grsecurity/Makefile linux-3.0.4/grsecurity/Makefile
53622--- linux-3.0.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
53623+++ linux-3.0.4/grsecurity/Makefile 2011-08-23 21:48:14.000000000 -0400
53624@@ -0,0 +1,34 @@
53625+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
53626+# during 2001-2009 it has been completely redesigned by Brad Spengler
53627+# into an RBAC system
53628+#
53629+# All code in this directory and various hooks inserted throughout the kernel
53630+# are copyright Brad Spengler - Open Source Security, Inc., and released
53631+# under the GPL v2 or higher
53632+
53633+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
53634+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
53635+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
53636+
53637+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
53638+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
53639+ gracl_learn.o grsec_log.o
53640+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
53641+
53642+ifdef CONFIG_NET
53643+obj-y += grsec_sock.o
53644+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
53645+endif
53646+
53647+ifndef CONFIG_GRKERNSEC
53648+obj-y += grsec_disabled.o
53649+endif
53650+
53651+ifdef CONFIG_GRKERNSEC_HIDESYM
53652+extra-y := grsec_hidesym.o
53653+$(obj)/grsec_hidesym.o:
53654+ @-chmod -f 500 /boot
53655+ @-chmod -f 500 /lib/modules
53656+ @-chmod -f 700 .
53657+ @echo ' grsec: protected kernel image paths'
53658+endif
53659diff -urNp linux-3.0.4/include/acpi/acpi_bus.h linux-3.0.4/include/acpi/acpi_bus.h
53660--- linux-3.0.4/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
53661+++ linux-3.0.4/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
53662@@ -107,7 +107,7 @@ struct acpi_device_ops {
53663 acpi_op_bind bind;
53664 acpi_op_unbind unbind;
53665 acpi_op_notify notify;
53666-};
53667+} __no_const;
53668
53669 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
53670
53671diff -urNp linux-3.0.4/include/asm-generic/atomic-long.h linux-3.0.4/include/asm-generic/atomic-long.h
53672--- linux-3.0.4/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
53673+++ linux-3.0.4/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
53674@@ -22,6 +22,12 @@
53675
53676 typedef atomic64_t atomic_long_t;
53677
53678+#ifdef CONFIG_PAX_REFCOUNT
53679+typedef atomic64_unchecked_t atomic_long_unchecked_t;
53680+#else
53681+typedef atomic64_t atomic_long_unchecked_t;
53682+#endif
53683+
53684 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
53685
53686 static inline long atomic_long_read(atomic_long_t *l)
53687@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
53688 return (long)atomic64_read(v);
53689 }
53690
53691+#ifdef CONFIG_PAX_REFCOUNT
53692+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53693+{
53694+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53695+
53696+ return (long)atomic64_read_unchecked(v);
53697+}
53698+#endif
53699+
53700 static inline void atomic_long_set(atomic_long_t *l, long i)
53701 {
53702 atomic64_t *v = (atomic64_t *)l;
53703@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
53704 atomic64_set(v, i);
53705 }
53706
53707+#ifdef CONFIG_PAX_REFCOUNT
53708+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53709+{
53710+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53711+
53712+ atomic64_set_unchecked(v, i);
53713+}
53714+#endif
53715+
53716 static inline void atomic_long_inc(atomic_long_t *l)
53717 {
53718 atomic64_t *v = (atomic64_t *)l;
53719@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
53720 atomic64_inc(v);
53721 }
53722
53723+#ifdef CONFIG_PAX_REFCOUNT
53724+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53725+{
53726+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53727+
53728+ atomic64_inc_unchecked(v);
53729+}
53730+#endif
53731+
53732 static inline void atomic_long_dec(atomic_long_t *l)
53733 {
53734 atomic64_t *v = (atomic64_t *)l;
53735@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
53736 atomic64_dec(v);
53737 }
53738
53739+#ifdef CONFIG_PAX_REFCOUNT
53740+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53741+{
53742+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53743+
53744+ atomic64_dec_unchecked(v);
53745+}
53746+#endif
53747+
53748 static inline void atomic_long_add(long i, atomic_long_t *l)
53749 {
53750 atomic64_t *v = (atomic64_t *)l;
53751@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
53752 atomic64_add(i, v);
53753 }
53754
53755+#ifdef CONFIG_PAX_REFCOUNT
53756+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53757+{
53758+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53759+
53760+ atomic64_add_unchecked(i, v);
53761+}
53762+#endif
53763+
53764 static inline void atomic_long_sub(long i, atomic_long_t *l)
53765 {
53766 atomic64_t *v = (atomic64_t *)l;
53767@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
53768 atomic64_sub(i, v);
53769 }
53770
53771+#ifdef CONFIG_PAX_REFCOUNT
53772+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
53773+{
53774+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53775+
53776+ atomic64_sub_unchecked(i, v);
53777+}
53778+#endif
53779+
53780 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
53781 {
53782 atomic64_t *v = (atomic64_t *)l;
53783@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
53784 return (long)atomic64_inc_return(v);
53785 }
53786
53787+#ifdef CONFIG_PAX_REFCOUNT
53788+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53789+{
53790+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53791+
53792+ return (long)atomic64_inc_return_unchecked(v);
53793+}
53794+#endif
53795+
53796 static inline long atomic_long_dec_return(atomic_long_t *l)
53797 {
53798 atomic64_t *v = (atomic64_t *)l;
53799@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
53800
53801 typedef atomic_t atomic_long_t;
53802
53803+#ifdef CONFIG_PAX_REFCOUNT
53804+typedef atomic_unchecked_t atomic_long_unchecked_t;
53805+#else
53806+typedef atomic_t atomic_long_unchecked_t;
53807+#endif
53808+
53809 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
53810 static inline long atomic_long_read(atomic_long_t *l)
53811 {
53812@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
53813 return (long)atomic_read(v);
53814 }
53815
53816+#ifdef CONFIG_PAX_REFCOUNT
53817+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53818+{
53819+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53820+
53821+ return (long)atomic_read_unchecked(v);
53822+}
53823+#endif
53824+
53825 static inline void atomic_long_set(atomic_long_t *l, long i)
53826 {
53827 atomic_t *v = (atomic_t *)l;
53828@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
53829 atomic_set(v, i);
53830 }
53831
53832+#ifdef CONFIG_PAX_REFCOUNT
53833+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53834+{
53835+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53836+
53837+ atomic_set_unchecked(v, i);
53838+}
53839+#endif
53840+
53841 static inline void atomic_long_inc(atomic_long_t *l)
53842 {
53843 atomic_t *v = (atomic_t *)l;
53844@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
53845 atomic_inc(v);
53846 }
53847
53848+#ifdef CONFIG_PAX_REFCOUNT
53849+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53850+{
53851+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53852+
53853+ atomic_inc_unchecked(v);
53854+}
53855+#endif
53856+
53857 static inline void atomic_long_dec(atomic_long_t *l)
53858 {
53859 atomic_t *v = (atomic_t *)l;
53860@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
53861 atomic_dec(v);
53862 }
53863
53864+#ifdef CONFIG_PAX_REFCOUNT
53865+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53866+{
53867+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53868+
53869+ atomic_dec_unchecked(v);
53870+}
53871+#endif
53872+
53873 static inline void atomic_long_add(long i, atomic_long_t *l)
53874 {
53875 atomic_t *v = (atomic_t *)l;
53876@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
53877 atomic_add(i, v);
53878 }
53879
53880+#ifdef CONFIG_PAX_REFCOUNT
53881+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53882+{
53883+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53884+
53885+ atomic_add_unchecked(i, v);
53886+}
53887+#endif
53888+
53889 static inline void atomic_long_sub(long i, atomic_long_t *l)
53890 {
53891 atomic_t *v = (atomic_t *)l;
53892@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
53893 atomic_sub(i, v);
53894 }
53895
53896+#ifdef CONFIG_PAX_REFCOUNT
53897+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
53898+{
53899+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53900+
53901+ atomic_sub_unchecked(i, v);
53902+}
53903+#endif
53904+
53905 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
53906 {
53907 atomic_t *v = (atomic_t *)l;
53908@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
53909 return (long)atomic_inc_return(v);
53910 }
53911
53912+#ifdef CONFIG_PAX_REFCOUNT
53913+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53914+{
53915+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53916+
53917+ return (long)atomic_inc_return_unchecked(v);
53918+}
53919+#endif
53920+
53921 static inline long atomic_long_dec_return(atomic_long_t *l)
53922 {
53923 atomic_t *v = (atomic_t *)l;
53924@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
53925
53926 #endif /* BITS_PER_LONG == 64 */
53927
53928+#ifdef CONFIG_PAX_REFCOUNT
53929+static inline void pax_refcount_needs_these_functions(void)
53930+{
53931+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
53932+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
53933+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
53934+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
53935+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
53936+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
53937+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
53938+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
53939+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
53940+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
53941+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
53942+
53943+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
53944+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
53945+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
53946+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
53947+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
53948+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
53949+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
53950+}
53951+#else
53952+#define atomic_read_unchecked(v) atomic_read(v)
53953+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
53954+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
53955+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
53956+#define atomic_inc_unchecked(v) atomic_inc(v)
53957+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
53958+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
53959+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
53960+#define atomic_dec_unchecked(v) atomic_dec(v)
53961+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
53962+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
53963+
53964+#define atomic_long_read_unchecked(v) atomic_long_read(v)
53965+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
53966+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
53967+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
53968+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
53969+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
53970+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
53971+#endif
53972+
53973 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
53974diff -urNp linux-3.0.4/include/asm-generic/cache.h linux-3.0.4/include/asm-generic/cache.h
53975--- linux-3.0.4/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
53976+++ linux-3.0.4/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
53977@@ -6,7 +6,7 @@
53978 * cache lines need to provide their own cache.h.
53979 */
53980
53981-#define L1_CACHE_SHIFT 5
53982-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
53983+#define L1_CACHE_SHIFT 5UL
53984+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
53985
53986 #endif /* __ASM_GENERIC_CACHE_H */
53987diff -urNp linux-3.0.4/include/asm-generic/int-l64.h linux-3.0.4/include/asm-generic/int-l64.h
53988--- linux-3.0.4/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
53989+++ linux-3.0.4/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
53990@@ -46,6 +46,8 @@ typedef unsigned int u32;
53991 typedef signed long s64;
53992 typedef unsigned long u64;
53993
53994+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
53995+
53996 #define S8_C(x) x
53997 #define U8_C(x) x ## U
53998 #define S16_C(x) x
53999diff -urNp linux-3.0.4/include/asm-generic/int-ll64.h linux-3.0.4/include/asm-generic/int-ll64.h
54000--- linux-3.0.4/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
54001+++ linux-3.0.4/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
54002@@ -51,6 +51,8 @@ typedef unsigned int u32;
54003 typedef signed long long s64;
54004 typedef unsigned long long u64;
54005
54006+typedef unsigned long long intoverflow_t;
54007+
54008 #define S8_C(x) x
54009 #define U8_C(x) x ## U
54010 #define S16_C(x) x
54011diff -urNp linux-3.0.4/include/asm-generic/kmap_types.h linux-3.0.4/include/asm-generic/kmap_types.h
54012--- linux-3.0.4/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
54013+++ linux-3.0.4/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
54014@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
54015 KMAP_D(17) KM_NMI,
54016 KMAP_D(18) KM_NMI_PTE,
54017 KMAP_D(19) KM_KDB,
54018+KMAP_D(20) KM_CLEARPAGE,
54019 /*
54020 * Remember to update debug_kmap_atomic() when adding new kmap types!
54021 */
54022-KMAP_D(20) KM_TYPE_NR
54023+KMAP_D(21) KM_TYPE_NR
54024 };
54025
54026 #undef KMAP_D
54027diff -urNp linux-3.0.4/include/asm-generic/pgtable.h linux-3.0.4/include/asm-generic/pgtable.h
54028--- linux-3.0.4/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
54029+++ linux-3.0.4/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
54030@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
54031 #endif /* __HAVE_ARCH_PMD_WRITE */
54032 #endif
54033
54034+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
54035+static inline unsigned long pax_open_kernel(void) { return 0; }
54036+#endif
54037+
54038+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
54039+static inline unsigned long pax_close_kernel(void) { return 0; }
54040+#endif
54041+
54042 #endif /* !__ASSEMBLY__ */
54043
54044 #endif /* _ASM_GENERIC_PGTABLE_H */
54045diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopmd.h linux-3.0.4/include/asm-generic/pgtable-nopmd.h
54046--- linux-3.0.4/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
54047+++ linux-3.0.4/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
54048@@ -1,14 +1,19 @@
54049 #ifndef _PGTABLE_NOPMD_H
54050 #define _PGTABLE_NOPMD_H
54051
54052-#ifndef __ASSEMBLY__
54053-
54054 #include <asm-generic/pgtable-nopud.h>
54055
54056-struct mm_struct;
54057-
54058 #define __PAGETABLE_PMD_FOLDED
54059
54060+#define PMD_SHIFT PUD_SHIFT
54061+#define PTRS_PER_PMD 1
54062+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
54063+#define PMD_MASK (~(PMD_SIZE-1))
54064+
54065+#ifndef __ASSEMBLY__
54066+
54067+struct mm_struct;
54068+
54069 /*
54070 * Having the pmd type consist of a pud gets the size right, and allows
54071 * us to conceptually access the pud entry that this pmd is folded into
54072@@ -16,11 +21,6 @@ struct mm_struct;
54073 */
54074 typedef struct { pud_t pud; } pmd_t;
54075
54076-#define PMD_SHIFT PUD_SHIFT
54077-#define PTRS_PER_PMD 1
54078-#define PMD_SIZE (1UL << PMD_SHIFT)
54079-#define PMD_MASK (~(PMD_SIZE-1))
54080-
54081 /*
54082 * The "pud_xxx()" functions here are trivial for a folded two-level
54083 * setup: the pmd is never bad, and a pmd always exists (as it's folded
54084diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopud.h linux-3.0.4/include/asm-generic/pgtable-nopud.h
54085--- linux-3.0.4/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
54086+++ linux-3.0.4/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
54087@@ -1,10 +1,15 @@
54088 #ifndef _PGTABLE_NOPUD_H
54089 #define _PGTABLE_NOPUD_H
54090
54091-#ifndef __ASSEMBLY__
54092-
54093 #define __PAGETABLE_PUD_FOLDED
54094
54095+#define PUD_SHIFT PGDIR_SHIFT
54096+#define PTRS_PER_PUD 1
54097+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
54098+#define PUD_MASK (~(PUD_SIZE-1))
54099+
54100+#ifndef __ASSEMBLY__
54101+
54102 /*
54103 * Having the pud type consist of a pgd gets the size right, and allows
54104 * us to conceptually access the pgd entry that this pud is folded into
54105@@ -12,11 +17,6 @@
54106 */
54107 typedef struct { pgd_t pgd; } pud_t;
54108
54109-#define PUD_SHIFT PGDIR_SHIFT
54110-#define PTRS_PER_PUD 1
54111-#define PUD_SIZE (1UL << PUD_SHIFT)
54112-#define PUD_MASK (~(PUD_SIZE-1))
54113-
54114 /*
54115 * The "pgd_xxx()" functions here are trivial for a folded two-level
54116 * setup: the pud is never bad, and a pud always exists (as it's folded
54117diff -urNp linux-3.0.4/include/asm-generic/vmlinux.lds.h linux-3.0.4/include/asm-generic/vmlinux.lds.h
54118--- linux-3.0.4/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
54119+++ linux-3.0.4/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
54120@@ -217,6 +217,7 @@
54121 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
54122 VMLINUX_SYMBOL(__start_rodata) = .; \
54123 *(.rodata) *(.rodata.*) \
54124+ *(.data..read_only) \
54125 *(__vermagic) /* Kernel version magic */ \
54126 . = ALIGN(8); \
54127 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
54128@@ -723,17 +724,18 @@
54129 * section in the linker script will go there too. @phdr should have
54130 * a leading colon.
54131 *
54132- * Note that this macros defines __per_cpu_load as an absolute symbol.
54133+ * Note that this macros defines per_cpu_load as an absolute symbol.
54134 * If there is no need to put the percpu section at a predetermined
54135 * address, use PERCPU_SECTION.
54136 */
54137 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
54138- VMLINUX_SYMBOL(__per_cpu_load) = .; \
54139- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
54140+ per_cpu_load = .; \
54141+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
54142 - LOAD_OFFSET) { \
54143+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
54144 PERCPU_INPUT(cacheline) \
54145 } phdr \
54146- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
54147+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
54148
54149 /**
54150 * PERCPU_SECTION - define output section for percpu area, simple version
54151diff -urNp linux-3.0.4/include/drm/drm_crtc_helper.h linux-3.0.4/include/drm/drm_crtc_helper.h
54152--- linux-3.0.4/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
54153+++ linux-3.0.4/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
54154@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
54155
54156 /* disable crtc when not in use - more explicit than dpms off */
54157 void (*disable)(struct drm_crtc *crtc);
54158-};
54159+} __no_const;
54160
54161 struct drm_encoder_helper_funcs {
54162 void (*dpms)(struct drm_encoder *encoder, int mode);
54163@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
54164 struct drm_connector *connector);
54165 /* disable encoder when not in use - more explicit than dpms off */
54166 void (*disable)(struct drm_encoder *encoder);
54167-};
54168+} __no_const;
54169
54170 struct drm_connector_helper_funcs {
54171 int (*get_modes)(struct drm_connector *connector);
54172diff -urNp linux-3.0.4/include/drm/drmP.h linux-3.0.4/include/drm/drmP.h
54173--- linux-3.0.4/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
54174+++ linux-3.0.4/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
54175@@ -73,6 +73,7 @@
54176 #include <linux/workqueue.h>
54177 #include <linux/poll.h>
54178 #include <asm/pgalloc.h>
54179+#include <asm/local.h>
54180 #include "drm.h"
54181
54182 #include <linux/idr.h>
54183@@ -1033,7 +1034,7 @@ struct drm_device {
54184
54185 /** \name Usage Counters */
54186 /*@{ */
54187- int open_count; /**< Outstanding files open */
54188+ local_t open_count; /**< Outstanding files open */
54189 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
54190 atomic_t vma_count; /**< Outstanding vma areas open */
54191 int buf_use; /**< Buffers in use -- cannot alloc */
54192@@ -1044,7 +1045,7 @@ struct drm_device {
54193 /*@{ */
54194 unsigned long counters;
54195 enum drm_stat_type types[15];
54196- atomic_t counts[15];
54197+ atomic_unchecked_t counts[15];
54198 /*@} */
54199
54200 struct list_head filelist;
54201diff -urNp linux-3.0.4/include/drm/ttm/ttm_memory.h linux-3.0.4/include/drm/ttm/ttm_memory.h
54202--- linux-3.0.4/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
54203+++ linux-3.0.4/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
54204@@ -47,7 +47,7 @@
54205
54206 struct ttm_mem_shrink {
54207 int (*do_shrink) (struct ttm_mem_shrink *);
54208-};
54209+} __no_const;
54210
54211 /**
54212 * struct ttm_mem_global - Global memory accounting structure.
54213diff -urNp linux-3.0.4/include/linux/a.out.h linux-3.0.4/include/linux/a.out.h
54214--- linux-3.0.4/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
54215+++ linux-3.0.4/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
54216@@ -39,6 +39,14 @@ enum machine_type {
54217 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
54218 };
54219
54220+/* Constants for the N_FLAGS field */
54221+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54222+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
54223+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
54224+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
54225+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54226+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54227+
54228 #if !defined (N_MAGIC)
54229 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
54230 #endif
54231diff -urNp linux-3.0.4/include/linux/atmdev.h linux-3.0.4/include/linux/atmdev.h
54232--- linux-3.0.4/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
54233+++ linux-3.0.4/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
54234@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
54235 #endif
54236
54237 struct k_atm_aal_stats {
54238-#define __HANDLE_ITEM(i) atomic_t i
54239+#define __HANDLE_ITEM(i) atomic_unchecked_t i
54240 __AAL_STAT_ITEMS
54241 #undef __HANDLE_ITEM
54242 };
54243diff -urNp linux-3.0.4/include/linux/binfmts.h linux-3.0.4/include/linux/binfmts.h
54244--- linux-3.0.4/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
54245+++ linux-3.0.4/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
54246@@ -88,6 +88,7 @@ struct linux_binfmt {
54247 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
54248 int (*load_shlib)(struct file *);
54249 int (*core_dump)(struct coredump_params *cprm);
54250+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
54251 unsigned long min_coredump; /* minimal dump size */
54252 };
54253
54254diff -urNp linux-3.0.4/include/linux/blkdev.h linux-3.0.4/include/linux/blkdev.h
54255--- linux-3.0.4/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
54256+++ linux-3.0.4/include/linux/blkdev.h 2011-08-26 19:49:56.000000000 -0400
54257@@ -1308,7 +1308,7 @@ struct block_device_operations {
54258 /* this callback is with swap_lock and sometimes page table lock held */
54259 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
54260 struct module *owner;
54261-};
54262+} __do_const;
54263
54264 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
54265 unsigned long);
54266diff -urNp linux-3.0.4/include/linux/blktrace_api.h linux-3.0.4/include/linux/blktrace_api.h
54267--- linux-3.0.4/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
54268+++ linux-3.0.4/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
54269@@ -161,7 +161,7 @@ struct blk_trace {
54270 struct dentry *dir;
54271 struct dentry *dropped_file;
54272 struct dentry *msg_file;
54273- atomic_t dropped;
54274+ atomic_unchecked_t dropped;
54275 };
54276
54277 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
54278diff -urNp linux-3.0.4/include/linux/byteorder/little_endian.h linux-3.0.4/include/linux/byteorder/little_endian.h
54279--- linux-3.0.4/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
54280+++ linux-3.0.4/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
54281@@ -42,51 +42,51 @@
54282
54283 static inline __le64 __cpu_to_le64p(const __u64 *p)
54284 {
54285- return (__force __le64)*p;
54286+ return (__force const __le64)*p;
54287 }
54288 static inline __u64 __le64_to_cpup(const __le64 *p)
54289 {
54290- return (__force __u64)*p;
54291+ return (__force const __u64)*p;
54292 }
54293 static inline __le32 __cpu_to_le32p(const __u32 *p)
54294 {
54295- return (__force __le32)*p;
54296+ return (__force const __le32)*p;
54297 }
54298 static inline __u32 __le32_to_cpup(const __le32 *p)
54299 {
54300- return (__force __u32)*p;
54301+ return (__force const __u32)*p;
54302 }
54303 static inline __le16 __cpu_to_le16p(const __u16 *p)
54304 {
54305- return (__force __le16)*p;
54306+ return (__force const __le16)*p;
54307 }
54308 static inline __u16 __le16_to_cpup(const __le16 *p)
54309 {
54310- return (__force __u16)*p;
54311+ return (__force const __u16)*p;
54312 }
54313 static inline __be64 __cpu_to_be64p(const __u64 *p)
54314 {
54315- return (__force __be64)__swab64p(p);
54316+ return (__force const __be64)__swab64p(p);
54317 }
54318 static inline __u64 __be64_to_cpup(const __be64 *p)
54319 {
54320- return __swab64p((__u64 *)p);
54321+ return __swab64p((const __u64 *)p);
54322 }
54323 static inline __be32 __cpu_to_be32p(const __u32 *p)
54324 {
54325- return (__force __be32)__swab32p(p);
54326+ return (__force const __be32)__swab32p(p);
54327 }
54328 static inline __u32 __be32_to_cpup(const __be32 *p)
54329 {
54330- return __swab32p((__u32 *)p);
54331+ return __swab32p((const __u32 *)p);
54332 }
54333 static inline __be16 __cpu_to_be16p(const __u16 *p)
54334 {
54335- return (__force __be16)__swab16p(p);
54336+ return (__force const __be16)__swab16p(p);
54337 }
54338 static inline __u16 __be16_to_cpup(const __be16 *p)
54339 {
54340- return __swab16p((__u16 *)p);
54341+ return __swab16p((const __u16 *)p);
54342 }
54343 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
54344 #define __le64_to_cpus(x) do { (void)(x); } while (0)
54345diff -urNp linux-3.0.4/include/linux/cache.h linux-3.0.4/include/linux/cache.h
54346--- linux-3.0.4/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
54347+++ linux-3.0.4/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
54348@@ -16,6 +16,10 @@
54349 #define __read_mostly
54350 #endif
54351
54352+#ifndef __read_only
54353+#define __read_only __read_mostly
54354+#endif
54355+
54356 #ifndef ____cacheline_aligned
54357 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
54358 #endif
54359diff -urNp linux-3.0.4/include/linux/capability.h linux-3.0.4/include/linux/capability.h
54360--- linux-3.0.4/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
54361+++ linux-3.0.4/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
54362@@ -547,6 +547,9 @@ extern bool capable(int cap);
54363 extern bool ns_capable(struct user_namespace *ns, int cap);
54364 extern bool task_ns_capable(struct task_struct *t, int cap);
54365 extern bool nsown_capable(int cap);
54366+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
54367+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
54368+extern bool capable_nolog(int cap);
54369
54370 /* audit system wants to get cap info from files as well */
54371 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
54372diff -urNp linux-3.0.4/include/linux/cleancache.h linux-3.0.4/include/linux/cleancache.h
54373--- linux-3.0.4/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
54374+++ linux-3.0.4/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
54375@@ -31,7 +31,7 @@ struct cleancache_ops {
54376 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
54377 void (*flush_inode)(int, struct cleancache_filekey);
54378 void (*flush_fs)(int);
54379-};
54380+} __no_const;
54381
54382 extern struct cleancache_ops
54383 cleancache_register_ops(struct cleancache_ops *ops);
54384diff -urNp linux-3.0.4/include/linux/compiler-gcc4.h linux-3.0.4/include/linux/compiler-gcc4.h
54385--- linux-3.0.4/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
54386+++ linux-3.0.4/include/linux/compiler-gcc4.h 2011-08-26 19:49:56.000000000 -0400
54387@@ -31,6 +31,12 @@
54388
54389
54390 #if __GNUC_MINOR__ >= 5
54391+
54392+#ifdef CONSTIFY_PLUGIN
54393+#define __no_const __attribute__((no_const))
54394+#define __do_const __attribute__((do_const))
54395+#endif
54396+
54397 /*
54398 * Mark a position in code as unreachable. This can be used to
54399 * suppress control flow warnings after asm blocks that transfer
54400@@ -46,6 +52,11 @@
54401 #define __noclone __attribute__((__noclone__))
54402
54403 #endif
54404+
54405+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
54406+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
54407+#define __bos0(ptr) __bos((ptr), 0)
54408+#define __bos1(ptr) __bos((ptr), 1)
54409 #endif
54410
54411 #if __GNUC_MINOR__ > 0
54412diff -urNp linux-3.0.4/include/linux/compiler.h linux-3.0.4/include/linux/compiler.h
54413--- linux-3.0.4/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
54414+++ linux-3.0.4/include/linux/compiler.h 2011-08-26 19:49:56.000000000 -0400
54415@@ -264,6 +264,14 @@ void ftrace_likely_update(struct ftrace_
54416 # define __attribute_const__ /* unimplemented */
54417 #endif
54418
54419+#ifndef __no_const
54420+# define __no_const
54421+#endif
54422+
54423+#ifndef __do_const
54424+# define __do_const
54425+#endif
54426+
54427 /*
54428 * Tell gcc if a function is cold. The compiler will assume any path
54429 * directly leading to the call is unlikely.
54430@@ -273,6 +281,22 @@ void ftrace_likely_update(struct ftrace_
54431 #define __cold
54432 #endif
54433
54434+#ifndef __alloc_size
54435+#define __alloc_size(...)
54436+#endif
54437+
54438+#ifndef __bos
54439+#define __bos(ptr, arg)
54440+#endif
54441+
54442+#ifndef __bos0
54443+#define __bos0(ptr)
54444+#endif
54445+
54446+#ifndef __bos1
54447+#define __bos1(ptr)
54448+#endif
54449+
54450 /* Simple shorthand for a section definition */
54451 #ifndef __section
54452 # define __section(S) __attribute__ ((__section__(#S)))
54453@@ -306,6 +330,7 @@ void ftrace_likely_update(struct ftrace_
54454 * use is to mediate communication between process-level code and irq/NMI
54455 * handlers, all running on the same CPU.
54456 */
54457-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
54458+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
54459+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
54460
54461 #endif /* __LINUX_COMPILER_H */
54462diff -urNp linux-3.0.4/include/linux/cpuset.h linux-3.0.4/include/linux/cpuset.h
54463--- linux-3.0.4/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
54464+++ linux-3.0.4/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
54465@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
54466 * nodemask.
54467 */
54468 smp_mb();
54469- --ACCESS_ONCE(current->mems_allowed_change_disable);
54470+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
54471 }
54472
54473 static inline void set_mems_allowed(nodemask_t nodemask)
54474diff -urNp linux-3.0.4/include/linux/crypto.h linux-3.0.4/include/linux/crypto.h
54475--- linux-3.0.4/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
54476+++ linux-3.0.4/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
54477@@ -361,7 +361,7 @@ struct cipher_tfm {
54478 const u8 *key, unsigned int keylen);
54479 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
54480 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
54481-};
54482+} __no_const;
54483
54484 struct hash_tfm {
54485 int (*init)(struct hash_desc *desc);
54486@@ -382,13 +382,13 @@ struct compress_tfm {
54487 int (*cot_decompress)(struct crypto_tfm *tfm,
54488 const u8 *src, unsigned int slen,
54489 u8 *dst, unsigned int *dlen);
54490-};
54491+} __no_const;
54492
54493 struct rng_tfm {
54494 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
54495 unsigned int dlen);
54496 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
54497-};
54498+} __no_const;
54499
54500 #define crt_ablkcipher crt_u.ablkcipher
54501 #define crt_aead crt_u.aead
54502diff -urNp linux-3.0.4/include/linux/decompress/mm.h linux-3.0.4/include/linux/decompress/mm.h
54503--- linux-3.0.4/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
54504+++ linux-3.0.4/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
54505@@ -77,7 +77,7 @@ static void free(void *where)
54506 * warnings when not needed (indeed large_malloc / large_free are not
54507 * needed by inflate */
54508
54509-#define malloc(a) kmalloc(a, GFP_KERNEL)
54510+#define malloc(a) kmalloc((a), GFP_KERNEL)
54511 #define free(a) kfree(a)
54512
54513 #define large_malloc(a) vmalloc(a)
54514diff -urNp linux-3.0.4/include/linux/dma-mapping.h linux-3.0.4/include/linux/dma-mapping.h
54515--- linux-3.0.4/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
54516+++ linux-3.0.4/include/linux/dma-mapping.h 2011-08-26 19:49:56.000000000 -0400
54517@@ -50,7 +50,7 @@ struct dma_map_ops {
54518 int (*dma_supported)(struct device *dev, u64 mask);
54519 int (*set_dma_mask)(struct device *dev, u64 mask);
54520 int is_phys;
54521-};
54522+} __do_const;
54523
54524 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
54525
54526diff -urNp linux-3.0.4/include/linux/efi.h linux-3.0.4/include/linux/efi.h
54527--- linux-3.0.4/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
54528+++ linux-3.0.4/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
54529@@ -410,7 +410,7 @@ struct efivar_operations {
54530 efi_get_variable_t *get_variable;
54531 efi_get_next_variable_t *get_next_variable;
54532 efi_set_variable_t *set_variable;
54533-};
54534+} __no_const;
54535
54536 struct efivars {
54537 /*
54538diff -urNp linux-3.0.4/include/linux/elf.h linux-3.0.4/include/linux/elf.h
54539--- linux-3.0.4/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
54540+++ linux-3.0.4/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
54541@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
54542 #define PT_GNU_EH_FRAME 0x6474e550
54543
54544 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
54545+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
54546+
54547+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
54548+
54549+/* Constants for the e_flags field */
54550+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54551+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
54552+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
54553+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
54554+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54555+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54556
54557 /*
54558 * Extended Numbering
54559@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
54560 #define DT_DEBUG 21
54561 #define DT_TEXTREL 22
54562 #define DT_JMPREL 23
54563+#define DT_FLAGS 30
54564+ #define DF_TEXTREL 0x00000004
54565 #define DT_ENCODING 32
54566 #define OLD_DT_LOOS 0x60000000
54567 #define DT_LOOS 0x6000000d
54568@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
54569 #define PF_W 0x2
54570 #define PF_X 0x1
54571
54572+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
54573+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
54574+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
54575+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
54576+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
54577+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
54578+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
54579+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
54580+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
54581+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
54582+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
54583+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
54584+
54585 typedef struct elf32_phdr{
54586 Elf32_Word p_type;
54587 Elf32_Off p_offset;
54588@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
54589 #define EI_OSABI 7
54590 #define EI_PAD 8
54591
54592+#define EI_PAX 14
54593+
54594 #define ELFMAG0 0x7f /* EI_MAG */
54595 #define ELFMAG1 'E'
54596 #define ELFMAG2 'L'
54597@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
54598 #define elf_note elf32_note
54599 #define elf_addr_t Elf32_Off
54600 #define Elf_Half Elf32_Half
54601+#define elf_dyn Elf32_Dyn
54602
54603 #else
54604
54605@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
54606 #define elf_note elf64_note
54607 #define elf_addr_t Elf64_Off
54608 #define Elf_Half Elf64_Half
54609+#define elf_dyn Elf64_Dyn
54610
54611 #endif
54612
54613diff -urNp linux-3.0.4/include/linux/firewire.h linux-3.0.4/include/linux/firewire.h
54614--- linux-3.0.4/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
54615+++ linux-3.0.4/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
54616@@ -428,7 +428,7 @@ struct fw_iso_context {
54617 union {
54618 fw_iso_callback_t sc;
54619 fw_iso_mc_callback_t mc;
54620- } callback;
54621+ } __no_const callback;
54622 void *callback_data;
54623 };
54624
54625diff -urNp linux-3.0.4/include/linux/fscache-cache.h linux-3.0.4/include/linux/fscache-cache.h
54626--- linux-3.0.4/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
54627+++ linux-3.0.4/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
54628@@ -102,7 +102,7 @@ struct fscache_operation {
54629 fscache_operation_release_t release;
54630 };
54631
54632-extern atomic_t fscache_op_debug_id;
54633+extern atomic_unchecked_t fscache_op_debug_id;
54634 extern void fscache_op_work_func(struct work_struct *work);
54635
54636 extern void fscache_enqueue_operation(struct fscache_operation *);
54637@@ -122,7 +122,7 @@ static inline void fscache_operation_ini
54638 {
54639 INIT_WORK(&op->work, fscache_op_work_func);
54640 atomic_set(&op->usage, 1);
54641- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
54642+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
54643 op->processor = processor;
54644 op->release = release;
54645 INIT_LIST_HEAD(&op->pend_link);
54646diff -urNp linux-3.0.4/include/linux/fs.h linux-3.0.4/include/linux/fs.h
54647--- linux-3.0.4/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
54648+++ linux-3.0.4/include/linux/fs.h 2011-08-26 19:49:56.000000000 -0400
54649@@ -109,6 +109,11 @@ struct inodes_stat_t {
54650 /* File was opened by fanotify and shouldn't generate fanotify events */
54651 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
54652
54653+/* Hack for grsec so as not to require read permission simply to execute
54654+ * a binary
54655+ */
54656+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
54657+
54658 /*
54659 * The below are the various read and write types that we support. Some of
54660 * them include behavioral modifiers that send information down to the
54661@@ -1571,7 +1576,8 @@ struct file_operations {
54662 int (*setlease)(struct file *, long, struct file_lock **);
54663 long (*fallocate)(struct file *file, int mode, loff_t offset,
54664 loff_t len);
54665-};
54666+} __do_const;
54667+typedef struct file_operations __no_const file_operations_no_const;
54668
54669 #define IPERM_FLAG_RCU 0x0001
54670
54671diff -urNp linux-3.0.4/include/linux/fsnotify.h linux-3.0.4/include/linux/fsnotify.h
54672--- linux-3.0.4/include/linux/fsnotify.h 2011-07-21 22:17:23.000000000 -0400
54673+++ linux-3.0.4/include/linux/fsnotify.h 2011-08-24 18:10:29.000000000 -0400
54674@@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
54675 */
54676 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
54677 {
54678- return kstrdup(name, GFP_KERNEL);
54679+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
54680 }
54681
54682 /*
54683diff -urNp linux-3.0.4/include/linux/fs_struct.h linux-3.0.4/include/linux/fs_struct.h
54684--- linux-3.0.4/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
54685+++ linux-3.0.4/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
54686@@ -6,7 +6,7 @@
54687 #include <linux/seqlock.h>
54688
54689 struct fs_struct {
54690- int users;
54691+ atomic_t users;
54692 spinlock_t lock;
54693 seqcount_t seq;
54694 int umask;
54695diff -urNp linux-3.0.4/include/linux/ftrace_event.h linux-3.0.4/include/linux/ftrace_event.h
54696--- linux-3.0.4/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
54697+++ linux-3.0.4/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
54698@@ -96,7 +96,7 @@ struct trace_event_functions {
54699 trace_print_func raw;
54700 trace_print_func hex;
54701 trace_print_func binary;
54702-};
54703+} __no_const;
54704
54705 struct trace_event {
54706 struct hlist_node node;
54707@@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
54708 extern int trace_add_event_call(struct ftrace_event_call *call);
54709 extern void trace_remove_event_call(struct ftrace_event_call *call);
54710
54711-#define is_signed_type(type) (((type)(-1)) < 0)
54712+#define is_signed_type(type) (((type)(-1)) < (type)1)
54713
54714 int trace_set_clr_event(const char *system, const char *event, int set);
54715
54716diff -urNp linux-3.0.4/include/linux/genhd.h linux-3.0.4/include/linux/genhd.h
54717--- linux-3.0.4/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
54718+++ linux-3.0.4/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
54719@@ -184,7 +184,7 @@ struct gendisk {
54720 struct kobject *slave_dir;
54721
54722 struct timer_rand_state *random;
54723- atomic_t sync_io; /* RAID */
54724+ atomic_unchecked_t sync_io; /* RAID */
54725 struct disk_events *ev;
54726 #ifdef CONFIG_BLK_DEV_INTEGRITY
54727 struct blk_integrity *integrity;
54728diff -urNp linux-3.0.4/include/linux/gracl.h linux-3.0.4/include/linux/gracl.h
54729--- linux-3.0.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
54730+++ linux-3.0.4/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
54731@@ -0,0 +1,317 @@
54732+#ifndef GR_ACL_H
54733+#define GR_ACL_H
54734+
54735+#include <linux/grdefs.h>
54736+#include <linux/resource.h>
54737+#include <linux/capability.h>
54738+#include <linux/dcache.h>
54739+#include <asm/resource.h>
54740+
54741+/* Major status information */
54742+
54743+#define GR_VERSION "grsecurity 2.2.2"
54744+#define GRSECURITY_VERSION 0x2202
54745+
54746+enum {
54747+ GR_SHUTDOWN = 0,
54748+ GR_ENABLE = 1,
54749+ GR_SPROLE = 2,
54750+ GR_RELOAD = 3,
54751+ GR_SEGVMOD = 4,
54752+ GR_STATUS = 5,
54753+ GR_UNSPROLE = 6,
54754+ GR_PASSSET = 7,
54755+ GR_SPROLEPAM = 8,
54756+};
54757+
54758+/* Password setup definitions
54759+ * kernel/grhash.c */
54760+enum {
54761+ GR_PW_LEN = 128,
54762+ GR_SALT_LEN = 16,
54763+ GR_SHA_LEN = 32,
54764+};
54765+
54766+enum {
54767+ GR_SPROLE_LEN = 64,
54768+};
54769+
54770+enum {
54771+ GR_NO_GLOB = 0,
54772+ GR_REG_GLOB,
54773+ GR_CREATE_GLOB
54774+};
54775+
54776+#define GR_NLIMITS 32
54777+
54778+/* Begin Data Structures */
54779+
54780+struct sprole_pw {
54781+ unsigned char *rolename;
54782+ unsigned char salt[GR_SALT_LEN];
54783+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
54784+};
54785+
54786+struct name_entry {
54787+ __u32 key;
54788+ ino_t inode;
54789+ dev_t device;
54790+ char *name;
54791+ __u16 len;
54792+ __u8 deleted;
54793+ struct name_entry *prev;
54794+ struct name_entry *next;
54795+};
54796+
54797+struct inodev_entry {
54798+ struct name_entry *nentry;
54799+ struct inodev_entry *prev;
54800+ struct inodev_entry *next;
54801+};
54802+
54803+struct acl_role_db {
54804+ struct acl_role_label **r_hash;
54805+ __u32 r_size;
54806+};
54807+
54808+struct inodev_db {
54809+ struct inodev_entry **i_hash;
54810+ __u32 i_size;
54811+};
54812+
54813+struct name_db {
54814+ struct name_entry **n_hash;
54815+ __u32 n_size;
54816+};
54817+
54818+struct crash_uid {
54819+ uid_t uid;
54820+ unsigned long expires;
54821+};
54822+
54823+struct gr_hash_struct {
54824+ void **table;
54825+ void **nametable;
54826+ void *first;
54827+ __u32 table_size;
54828+ __u32 used_size;
54829+ int type;
54830+};
54831+
54832+/* Userspace Grsecurity ACL data structures */
54833+
54834+struct acl_subject_label {
54835+ char *filename;
54836+ ino_t inode;
54837+ dev_t device;
54838+ __u32 mode;
54839+ kernel_cap_t cap_mask;
54840+ kernel_cap_t cap_lower;
54841+ kernel_cap_t cap_invert_audit;
54842+
54843+ struct rlimit res[GR_NLIMITS];
54844+ __u32 resmask;
54845+
54846+ __u8 user_trans_type;
54847+ __u8 group_trans_type;
54848+ uid_t *user_transitions;
54849+ gid_t *group_transitions;
54850+ __u16 user_trans_num;
54851+ __u16 group_trans_num;
54852+
54853+ __u32 sock_families[2];
54854+ __u32 ip_proto[8];
54855+ __u32 ip_type;
54856+ struct acl_ip_label **ips;
54857+ __u32 ip_num;
54858+ __u32 inaddr_any_override;
54859+
54860+ __u32 crashes;
54861+ unsigned long expires;
54862+
54863+ struct acl_subject_label *parent_subject;
54864+ struct gr_hash_struct *hash;
54865+ struct acl_subject_label *prev;
54866+ struct acl_subject_label *next;
54867+
54868+ struct acl_object_label **obj_hash;
54869+ __u32 obj_hash_size;
54870+ __u16 pax_flags;
54871+};
54872+
54873+struct role_allowed_ip {
54874+ __u32 addr;
54875+ __u32 netmask;
54876+
54877+ struct role_allowed_ip *prev;
54878+ struct role_allowed_ip *next;
54879+};
54880+
54881+struct role_transition {
54882+ char *rolename;
54883+
54884+ struct role_transition *prev;
54885+ struct role_transition *next;
54886+};
54887+
54888+struct acl_role_label {
54889+ char *rolename;
54890+ uid_t uidgid;
54891+ __u16 roletype;
54892+
54893+ __u16 auth_attempts;
54894+ unsigned long expires;
54895+
54896+ struct acl_subject_label *root_label;
54897+ struct gr_hash_struct *hash;
54898+
54899+ struct acl_role_label *prev;
54900+ struct acl_role_label *next;
54901+
54902+ struct role_transition *transitions;
54903+ struct role_allowed_ip *allowed_ips;
54904+ uid_t *domain_children;
54905+ __u16 domain_child_num;
54906+
54907+ struct acl_subject_label **subj_hash;
54908+ __u32 subj_hash_size;
54909+};
54910+
54911+struct user_acl_role_db {
54912+ struct acl_role_label **r_table;
54913+ __u32 num_pointers; /* Number of allocations to track */
54914+ __u32 num_roles; /* Number of roles */
54915+ __u32 num_domain_children; /* Number of domain children */
54916+ __u32 num_subjects; /* Number of subjects */
54917+ __u32 num_objects; /* Number of objects */
54918+};
54919+
54920+struct acl_object_label {
54921+ char *filename;
54922+ ino_t inode;
54923+ dev_t device;
54924+ __u32 mode;
54925+
54926+ struct acl_subject_label *nested;
54927+ struct acl_object_label *globbed;
54928+
54929+ /* next two structures not used */
54930+
54931+ struct acl_object_label *prev;
54932+ struct acl_object_label *next;
54933+};
54934+
54935+struct acl_ip_label {
54936+ char *iface;
54937+ __u32 addr;
54938+ __u32 netmask;
54939+ __u16 low, high;
54940+ __u8 mode;
54941+ __u32 type;
54942+ __u32 proto[8];
54943+
54944+ /* next two structures not used */
54945+
54946+ struct acl_ip_label *prev;
54947+ struct acl_ip_label *next;
54948+};
54949+
54950+struct gr_arg {
54951+ struct user_acl_role_db role_db;
54952+ unsigned char pw[GR_PW_LEN];
54953+ unsigned char salt[GR_SALT_LEN];
54954+ unsigned char sum[GR_SHA_LEN];
54955+ unsigned char sp_role[GR_SPROLE_LEN];
54956+ struct sprole_pw *sprole_pws;
54957+ dev_t segv_device;
54958+ ino_t segv_inode;
54959+ uid_t segv_uid;
54960+ __u16 num_sprole_pws;
54961+ __u16 mode;
54962+};
54963+
54964+struct gr_arg_wrapper {
54965+ struct gr_arg *arg;
54966+ __u32 version;
54967+ __u32 size;
54968+};
54969+
54970+struct subject_map {
54971+ struct acl_subject_label *user;
54972+ struct acl_subject_label *kernel;
54973+ struct subject_map *prev;
54974+ struct subject_map *next;
54975+};
54976+
54977+struct acl_subj_map_db {
54978+ struct subject_map **s_hash;
54979+ __u32 s_size;
54980+};
54981+
54982+/* End Data Structures Section */
54983+
54984+/* Hash functions generated by empirical testing by Brad Spengler
54985+ Makes good use of the low bits of the inode. Generally 0-1 times
54986+ in loop for successful match. 0-3 for unsuccessful match.
54987+ Shift/add algorithm with modulus of table size and an XOR*/
54988+
54989+static __inline__ unsigned int
54990+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
54991+{
54992+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
54993+}
54994+
54995+ static __inline__ unsigned int
54996+shash(const struct acl_subject_label *userp, const unsigned int sz)
54997+{
54998+ return ((const unsigned long)userp % sz);
54999+}
55000+
55001+static __inline__ unsigned int
55002+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
55003+{
55004+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
55005+}
55006+
55007+static __inline__ unsigned int
55008+nhash(const char *name, const __u16 len, const unsigned int sz)
55009+{
55010+ return full_name_hash((const unsigned char *)name, len) % sz;
55011+}
55012+
55013+#define FOR_EACH_ROLE_START(role) \
55014+ role = role_list; \
55015+ while (role) {
55016+
55017+#define FOR_EACH_ROLE_END(role) \
55018+ role = role->prev; \
55019+ }
55020+
55021+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
55022+ subj = NULL; \
55023+ iter = 0; \
55024+ while (iter < role->subj_hash_size) { \
55025+ if (subj == NULL) \
55026+ subj = role->subj_hash[iter]; \
55027+ if (subj == NULL) { \
55028+ iter++; \
55029+ continue; \
55030+ }
55031+
55032+#define FOR_EACH_SUBJECT_END(subj,iter) \
55033+ subj = subj->next; \
55034+ if (subj == NULL) \
55035+ iter++; \
55036+ }
55037+
55038+
55039+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
55040+ subj = role->hash->first; \
55041+ while (subj != NULL) {
55042+
55043+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
55044+ subj = subj->next; \
55045+ }
55046+
55047+#endif
55048+
55049diff -urNp linux-3.0.4/include/linux/gralloc.h linux-3.0.4/include/linux/gralloc.h
55050--- linux-3.0.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
55051+++ linux-3.0.4/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
55052@@ -0,0 +1,9 @@
55053+#ifndef __GRALLOC_H
55054+#define __GRALLOC_H
55055+
55056+void acl_free_all(void);
55057+int acl_alloc_stack_init(unsigned long size);
55058+void *acl_alloc(unsigned long len);
55059+void *acl_alloc_num(unsigned long num, unsigned long len);
55060+
55061+#endif
55062diff -urNp linux-3.0.4/include/linux/grdefs.h linux-3.0.4/include/linux/grdefs.h
55063--- linux-3.0.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
55064+++ linux-3.0.4/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
55065@@ -0,0 +1,140 @@
55066+#ifndef GRDEFS_H
55067+#define GRDEFS_H
55068+
55069+/* Begin grsecurity status declarations */
55070+
55071+enum {
55072+ GR_READY = 0x01,
55073+ GR_STATUS_INIT = 0x00 // disabled state
55074+};
55075+
55076+/* Begin ACL declarations */
55077+
55078+/* Role flags */
55079+
55080+enum {
55081+ GR_ROLE_USER = 0x0001,
55082+ GR_ROLE_GROUP = 0x0002,
55083+ GR_ROLE_DEFAULT = 0x0004,
55084+ GR_ROLE_SPECIAL = 0x0008,
55085+ GR_ROLE_AUTH = 0x0010,
55086+ GR_ROLE_NOPW = 0x0020,
55087+ GR_ROLE_GOD = 0x0040,
55088+ GR_ROLE_LEARN = 0x0080,
55089+ GR_ROLE_TPE = 0x0100,
55090+ GR_ROLE_DOMAIN = 0x0200,
55091+ GR_ROLE_PAM = 0x0400,
55092+ GR_ROLE_PERSIST = 0x0800
55093+};
55094+
55095+/* ACL Subject and Object mode flags */
55096+enum {
55097+ GR_DELETED = 0x80000000
55098+};
55099+
55100+/* ACL Object-only mode flags */
55101+enum {
55102+ GR_READ = 0x00000001,
55103+ GR_APPEND = 0x00000002,
55104+ GR_WRITE = 0x00000004,
55105+ GR_EXEC = 0x00000008,
55106+ GR_FIND = 0x00000010,
55107+ GR_INHERIT = 0x00000020,
55108+ GR_SETID = 0x00000040,
55109+ GR_CREATE = 0x00000080,
55110+ GR_DELETE = 0x00000100,
55111+ GR_LINK = 0x00000200,
55112+ GR_AUDIT_READ = 0x00000400,
55113+ GR_AUDIT_APPEND = 0x00000800,
55114+ GR_AUDIT_WRITE = 0x00001000,
55115+ GR_AUDIT_EXEC = 0x00002000,
55116+ GR_AUDIT_FIND = 0x00004000,
55117+ GR_AUDIT_INHERIT= 0x00008000,
55118+ GR_AUDIT_SETID = 0x00010000,
55119+ GR_AUDIT_CREATE = 0x00020000,
55120+ GR_AUDIT_DELETE = 0x00040000,
55121+ GR_AUDIT_LINK = 0x00080000,
55122+ GR_PTRACERD = 0x00100000,
55123+ GR_NOPTRACE = 0x00200000,
55124+ GR_SUPPRESS = 0x00400000,
55125+ GR_NOLEARN = 0x00800000,
55126+ GR_INIT_TRANSFER= 0x01000000
55127+};
55128+
55129+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
55130+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
55131+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
55132+
55133+/* ACL subject-only mode flags */
55134+enum {
55135+ GR_KILL = 0x00000001,
55136+ GR_VIEW = 0x00000002,
55137+ GR_PROTECTED = 0x00000004,
55138+ GR_LEARN = 0x00000008,
55139+ GR_OVERRIDE = 0x00000010,
55140+ /* just a placeholder, this mode is only used in userspace */
55141+ GR_DUMMY = 0x00000020,
55142+ GR_PROTSHM = 0x00000040,
55143+ GR_KILLPROC = 0x00000080,
55144+ GR_KILLIPPROC = 0x00000100,
55145+ /* just a placeholder, this mode is only used in userspace */
55146+ GR_NOTROJAN = 0x00000200,
55147+ GR_PROTPROCFD = 0x00000400,
55148+ GR_PROCACCT = 0x00000800,
55149+ GR_RELAXPTRACE = 0x00001000,
55150+ GR_NESTED = 0x00002000,
55151+ GR_INHERITLEARN = 0x00004000,
55152+ GR_PROCFIND = 0x00008000,
55153+ GR_POVERRIDE = 0x00010000,
55154+ GR_KERNELAUTH = 0x00020000,
55155+ GR_ATSECURE = 0x00040000,
55156+ GR_SHMEXEC = 0x00080000
55157+};
55158+
55159+enum {
55160+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
55161+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
55162+ GR_PAX_ENABLE_MPROTECT = 0x0004,
55163+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
55164+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
55165+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
55166+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
55167+ GR_PAX_DISABLE_MPROTECT = 0x0400,
55168+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
55169+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
55170+};
55171+
55172+enum {
55173+ GR_ID_USER = 0x01,
55174+ GR_ID_GROUP = 0x02,
55175+};
55176+
55177+enum {
55178+ GR_ID_ALLOW = 0x01,
55179+ GR_ID_DENY = 0x02,
55180+};
55181+
55182+#define GR_CRASH_RES 31
55183+#define GR_UIDTABLE_MAX 500
55184+
55185+/* begin resource learning section */
55186+enum {
55187+ GR_RLIM_CPU_BUMP = 60,
55188+ GR_RLIM_FSIZE_BUMP = 50000,
55189+ GR_RLIM_DATA_BUMP = 10000,
55190+ GR_RLIM_STACK_BUMP = 1000,
55191+ GR_RLIM_CORE_BUMP = 10000,
55192+ GR_RLIM_RSS_BUMP = 500000,
55193+ GR_RLIM_NPROC_BUMP = 1,
55194+ GR_RLIM_NOFILE_BUMP = 5,
55195+ GR_RLIM_MEMLOCK_BUMP = 50000,
55196+ GR_RLIM_AS_BUMP = 500000,
55197+ GR_RLIM_LOCKS_BUMP = 2,
55198+ GR_RLIM_SIGPENDING_BUMP = 5,
55199+ GR_RLIM_MSGQUEUE_BUMP = 10000,
55200+ GR_RLIM_NICE_BUMP = 1,
55201+ GR_RLIM_RTPRIO_BUMP = 1,
55202+ GR_RLIM_RTTIME_BUMP = 1000000
55203+};
55204+
55205+#endif
55206diff -urNp linux-3.0.4/include/linux/grinternal.h linux-3.0.4/include/linux/grinternal.h
55207--- linux-3.0.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
55208+++ linux-3.0.4/include/linux/grinternal.h 2011-08-23 21:48:14.000000000 -0400
55209@@ -0,0 +1,219 @@
55210+#ifndef __GRINTERNAL_H
55211+#define __GRINTERNAL_H
55212+
55213+#ifdef CONFIG_GRKERNSEC
55214+
55215+#include <linux/fs.h>
55216+#include <linux/mnt_namespace.h>
55217+#include <linux/nsproxy.h>
55218+#include <linux/gracl.h>
55219+#include <linux/grdefs.h>
55220+#include <linux/grmsg.h>
55221+
55222+void gr_add_learn_entry(const char *fmt, ...)
55223+ __attribute__ ((format (printf, 1, 2)));
55224+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
55225+ const struct vfsmount *mnt);
55226+__u32 gr_check_create(const struct dentry *new_dentry,
55227+ const struct dentry *parent,
55228+ const struct vfsmount *mnt, const __u32 mode);
55229+int gr_check_protected_task(const struct task_struct *task);
55230+__u32 to_gr_audit(const __u32 reqmode);
55231+int gr_set_acls(const int type);
55232+int gr_apply_subject_to_task(struct task_struct *task);
55233+int gr_acl_is_enabled(void);
55234+char gr_roletype_to_char(void);
55235+
55236+void gr_handle_alertkill(struct task_struct *task);
55237+char *gr_to_filename(const struct dentry *dentry,
55238+ const struct vfsmount *mnt);
55239+char *gr_to_filename1(const struct dentry *dentry,
55240+ const struct vfsmount *mnt);
55241+char *gr_to_filename2(const struct dentry *dentry,
55242+ const struct vfsmount *mnt);
55243+char *gr_to_filename3(const struct dentry *dentry,
55244+ const struct vfsmount *mnt);
55245+
55246+extern int grsec_enable_harden_ptrace;
55247+extern int grsec_enable_link;
55248+extern int grsec_enable_fifo;
55249+extern int grsec_enable_execve;
55250+extern int grsec_enable_shm;
55251+extern int grsec_enable_execlog;
55252+extern int grsec_enable_signal;
55253+extern int grsec_enable_audit_ptrace;
55254+extern int grsec_enable_forkfail;
55255+extern int grsec_enable_time;
55256+extern int grsec_enable_rofs;
55257+extern int grsec_enable_chroot_shmat;
55258+extern int grsec_enable_chroot_mount;
55259+extern int grsec_enable_chroot_double;
55260+extern int grsec_enable_chroot_pivot;
55261+extern int grsec_enable_chroot_chdir;
55262+extern int grsec_enable_chroot_chmod;
55263+extern int grsec_enable_chroot_mknod;
55264+extern int grsec_enable_chroot_fchdir;
55265+extern int grsec_enable_chroot_nice;
55266+extern int grsec_enable_chroot_execlog;
55267+extern int grsec_enable_chroot_caps;
55268+extern int grsec_enable_chroot_sysctl;
55269+extern int grsec_enable_chroot_unix;
55270+extern int grsec_enable_tpe;
55271+extern int grsec_tpe_gid;
55272+extern int grsec_enable_tpe_all;
55273+extern int grsec_enable_tpe_invert;
55274+extern int grsec_enable_socket_all;
55275+extern int grsec_socket_all_gid;
55276+extern int grsec_enable_socket_client;
55277+extern int grsec_socket_client_gid;
55278+extern int grsec_enable_socket_server;
55279+extern int grsec_socket_server_gid;
55280+extern int grsec_audit_gid;
55281+extern int grsec_enable_group;
55282+extern int grsec_enable_audit_textrel;
55283+extern int grsec_enable_log_rwxmaps;
55284+extern int grsec_enable_mount;
55285+extern int grsec_enable_chdir;
55286+extern int grsec_resource_logging;
55287+extern int grsec_enable_blackhole;
55288+extern int grsec_lastack_retries;
55289+extern int grsec_enable_brute;
55290+extern int grsec_lock;
55291+
55292+extern spinlock_t grsec_alert_lock;
55293+extern unsigned long grsec_alert_wtime;
55294+extern unsigned long grsec_alert_fyet;
55295+
55296+extern spinlock_t grsec_audit_lock;
55297+
55298+extern rwlock_t grsec_exec_file_lock;
55299+
55300+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
55301+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
55302+ (tsk)->exec_file->f_vfsmnt) : "/")
55303+
55304+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
55305+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
55306+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55307+
55308+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
55309+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
55310+ (tsk)->exec_file->f_vfsmnt) : "/")
55311+
55312+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
55313+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
55314+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55315+
55316+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
55317+
55318+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
55319+
55320+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
55321+ (task)->pid, (cred)->uid, \
55322+ (cred)->euid, (cred)->gid, (cred)->egid, \
55323+ gr_parent_task_fullpath(task), \
55324+ (task)->real_parent->comm, (task)->real_parent->pid, \
55325+ (pcred)->uid, (pcred)->euid, \
55326+ (pcred)->gid, (pcred)->egid
55327+
55328+#define GR_CHROOT_CAPS {{ \
55329+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
55330+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
55331+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
55332+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
55333+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
55334+ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
55335+
55336+#define security_learn(normal_msg,args...) \
55337+({ \
55338+ read_lock(&grsec_exec_file_lock); \
55339+ gr_add_learn_entry(normal_msg "\n", ## args); \
55340+ read_unlock(&grsec_exec_file_lock); \
55341+})
55342+
55343+enum {
55344+ GR_DO_AUDIT,
55345+ GR_DONT_AUDIT,
55346+ /* used for non-audit messages that we shouldn't kill the task on */
55347+ GR_DONT_AUDIT_GOOD
55348+};
55349+
55350+enum {
55351+ GR_TTYSNIFF,
55352+ GR_RBAC,
55353+ GR_RBAC_STR,
55354+ GR_STR_RBAC,
55355+ GR_RBAC_MODE2,
55356+ GR_RBAC_MODE3,
55357+ GR_FILENAME,
55358+ GR_SYSCTL_HIDDEN,
55359+ GR_NOARGS,
55360+ GR_ONE_INT,
55361+ GR_ONE_INT_TWO_STR,
55362+ GR_ONE_STR,
55363+ GR_STR_INT,
55364+ GR_TWO_STR_INT,
55365+ GR_TWO_INT,
55366+ GR_TWO_U64,
55367+ GR_THREE_INT,
55368+ GR_FIVE_INT_TWO_STR,
55369+ GR_TWO_STR,
55370+ GR_THREE_STR,
55371+ GR_FOUR_STR,
55372+ GR_STR_FILENAME,
55373+ GR_FILENAME_STR,
55374+ GR_FILENAME_TWO_INT,
55375+ GR_FILENAME_TWO_INT_STR,
55376+ GR_TEXTREL,
55377+ GR_PTRACE,
55378+ GR_RESOURCE,
55379+ GR_CAP,
55380+ GR_SIG,
55381+ GR_SIG2,
55382+ GR_CRASH1,
55383+ GR_CRASH2,
55384+ GR_PSACCT,
55385+ GR_RWXMAP
55386+};
55387+
55388+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
55389+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
55390+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
55391+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
55392+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
55393+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
55394+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
55395+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
55396+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
55397+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
55398+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
55399+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
55400+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
55401+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
55402+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
55403+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
55404+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
55405+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
55406+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
55407+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
55408+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
55409+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
55410+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
55411+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
55412+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
55413+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
55414+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
55415+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
55416+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
55417+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
55418+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
55419+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
55420+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
55421+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
55422+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
55423+
55424+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
55425+
55426+#endif
55427+
55428+#endif
55429diff -urNp linux-3.0.4/include/linux/grmsg.h linux-3.0.4/include/linux/grmsg.h
55430--- linux-3.0.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
55431+++ linux-3.0.4/include/linux/grmsg.h 2011-08-25 17:27:26.000000000 -0400
55432@@ -0,0 +1,107 @@
55433+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
55434+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
55435+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
55436+#define GR_STOPMOD_MSG "denied modification of module state by "
55437+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
55438+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
55439+#define GR_IOPERM_MSG "denied use of ioperm() by "
55440+#define GR_IOPL_MSG "denied use of iopl() by "
55441+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
55442+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
55443+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
55444+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
55445+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
55446+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
55447+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
55448+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
55449+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
55450+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
55451+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
55452+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
55453+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
55454+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
55455+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
55456+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
55457+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
55458+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
55459+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
55460+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
55461+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
55462+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
55463+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
55464+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
55465+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
55466+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
55467+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
55468+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
55469+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
55470+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
55471+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
55472+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
55473+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
55474+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
55475+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
55476+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
55477+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
55478+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
55479+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
55480+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
55481+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
55482+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
55483+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
55484+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
55485+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
55486+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
55487+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
55488+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
55489+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
55490+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
55491+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
55492+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
55493+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
55494+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
55495+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
55496+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
55497+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
55498+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
55499+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
55500+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
55501+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
55502+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
55503+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
55504+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
55505+#define GR_FAILFORK_MSG "failed fork with errno %s by "
55506+#define GR_NICE_CHROOT_MSG "denied priority change by "
55507+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
55508+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
55509+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
55510+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
55511+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
55512+#define GR_TIME_MSG "time set by "
55513+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
55514+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
55515+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
55516+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
55517+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
55518+#define GR_BIND_MSG "denied bind() by "
55519+#define GR_CONNECT_MSG "denied connect() by "
55520+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
55521+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
55522+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
55523+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
55524+#define GR_CAP_ACL_MSG "use of %s denied for "
55525+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
55526+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
55527+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
55528+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
55529+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
55530+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
55531+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
55532+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
55533+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
55534+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
55535+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
55536+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
55537+#define GR_VM86_MSG "denied use of vm86 by "
55538+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
55539+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
55540diff -urNp linux-3.0.4/include/linux/grsecurity.h linux-3.0.4/include/linux/grsecurity.h
55541--- linux-3.0.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
55542+++ linux-3.0.4/include/linux/grsecurity.h 2011-08-25 17:27:36.000000000 -0400
55543@@ -0,0 +1,227 @@
55544+#ifndef GR_SECURITY_H
55545+#define GR_SECURITY_H
55546+#include <linux/fs.h>
55547+#include <linux/fs_struct.h>
55548+#include <linux/binfmts.h>
55549+#include <linux/gracl.h>
55550+
55551+/* notify of brain-dead configs */
55552+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55553+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
55554+#endif
55555+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
55556+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
55557+#endif
55558+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55559+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55560+#endif
55561+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55562+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55563+#endif
55564+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
55565+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
55566+#endif
55567+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
55568+#error "CONFIG_PAX enabled, but no PaX options are enabled."
55569+#endif
55570+
55571+#include <linux/compat.h>
55572+
55573+struct user_arg_ptr {
55574+#ifdef CONFIG_COMPAT
55575+ bool is_compat;
55576+#endif
55577+ union {
55578+ const char __user *const __user *native;
55579+#ifdef CONFIG_COMPAT
55580+ compat_uptr_t __user *compat;
55581+#endif
55582+ } ptr;
55583+};
55584+
55585+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
55586+void gr_handle_brute_check(void);
55587+void gr_handle_kernel_exploit(void);
55588+int gr_process_user_ban(void);
55589+
55590+char gr_roletype_to_char(void);
55591+
55592+int gr_acl_enable_at_secure(void);
55593+
55594+int gr_check_user_change(int real, int effective, int fs);
55595+int gr_check_group_change(int real, int effective, int fs);
55596+
55597+void gr_del_task_from_ip_table(struct task_struct *p);
55598+
55599+int gr_pid_is_chrooted(struct task_struct *p);
55600+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
55601+int gr_handle_chroot_nice(void);
55602+int gr_handle_chroot_sysctl(const int op);
55603+int gr_handle_chroot_setpriority(struct task_struct *p,
55604+ const int niceval);
55605+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
55606+int gr_handle_chroot_chroot(const struct dentry *dentry,
55607+ const struct vfsmount *mnt);
55608+int gr_handle_chroot_caps(struct path *path);
55609+void gr_handle_chroot_chdir(struct path *path);
55610+int gr_handle_chroot_chmod(const struct dentry *dentry,
55611+ const struct vfsmount *mnt, const int mode);
55612+int gr_handle_chroot_mknod(const struct dentry *dentry,
55613+ const struct vfsmount *mnt, const int mode);
55614+int gr_handle_chroot_mount(const struct dentry *dentry,
55615+ const struct vfsmount *mnt,
55616+ const char *dev_name);
55617+int gr_handle_chroot_pivot(void);
55618+int gr_handle_chroot_unix(const pid_t pid);
55619+
55620+int gr_handle_rawio(const struct inode *inode);
55621+
55622+void gr_handle_ioperm(void);
55623+void gr_handle_iopl(void);
55624+
55625+int gr_tpe_allow(const struct file *file);
55626+
55627+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
55628+void gr_clear_chroot_entries(struct task_struct *task);
55629+
55630+void gr_log_forkfail(const int retval);
55631+void gr_log_timechange(void);
55632+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
55633+void gr_log_chdir(const struct dentry *dentry,
55634+ const struct vfsmount *mnt);
55635+void gr_log_chroot_exec(const struct dentry *dentry,
55636+ const struct vfsmount *mnt);
55637+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
55638+void gr_log_remount(const char *devname, const int retval);
55639+void gr_log_unmount(const char *devname, const int retval);
55640+void gr_log_mount(const char *from, const char *to, const int retval);
55641+void gr_log_textrel(struct vm_area_struct *vma);
55642+void gr_log_rwxmmap(struct file *file);
55643+void gr_log_rwxmprotect(struct file *file);
55644+
55645+int gr_handle_follow_link(const struct inode *parent,
55646+ const struct inode *inode,
55647+ const struct dentry *dentry,
55648+ const struct vfsmount *mnt);
55649+int gr_handle_fifo(const struct dentry *dentry,
55650+ const struct vfsmount *mnt,
55651+ const struct dentry *dir, const int flag,
55652+ const int acc_mode);
55653+int gr_handle_hardlink(const struct dentry *dentry,
55654+ const struct vfsmount *mnt,
55655+ struct inode *inode,
55656+ const int mode, const char *to);
55657+
55658+int gr_is_capable(const int cap);
55659+int gr_is_capable_nolog(const int cap);
55660+void gr_learn_resource(const struct task_struct *task, const int limit,
55661+ const unsigned long wanted, const int gt);
55662+void gr_copy_label(struct task_struct *tsk);
55663+void gr_handle_crash(struct task_struct *task, const int sig);
55664+int gr_handle_signal(const struct task_struct *p, const int sig);
55665+int gr_check_crash_uid(const uid_t uid);
55666+int gr_check_protected_task(const struct task_struct *task);
55667+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
55668+int gr_acl_handle_mmap(const struct file *file,
55669+ const unsigned long prot);
55670+int gr_acl_handle_mprotect(const struct file *file,
55671+ const unsigned long prot);
55672+int gr_check_hidden_task(const struct task_struct *tsk);
55673+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
55674+ const struct vfsmount *mnt);
55675+__u32 gr_acl_handle_utime(const struct dentry *dentry,
55676+ const struct vfsmount *mnt);
55677+__u32 gr_acl_handle_access(const struct dentry *dentry,
55678+ const struct vfsmount *mnt, const int fmode);
55679+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
55680+ const struct vfsmount *mnt, mode_t mode);
55681+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
55682+ const struct vfsmount *mnt, mode_t mode);
55683+__u32 gr_acl_handle_chown(const struct dentry *dentry,
55684+ const struct vfsmount *mnt);
55685+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
55686+ const struct vfsmount *mnt);
55687+int gr_handle_ptrace(struct task_struct *task, const long request);
55688+int gr_handle_proc_ptrace(struct task_struct *task);
55689+__u32 gr_acl_handle_execve(const struct dentry *dentry,
55690+ const struct vfsmount *mnt);
55691+int gr_check_crash_exec(const struct file *filp);
55692+int gr_acl_is_enabled(void);
55693+void gr_set_kernel_label(struct task_struct *task);
55694+void gr_set_role_label(struct task_struct *task, const uid_t uid,
55695+ const gid_t gid);
55696+int gr_set_proc_label(const struct dentry *dentry,
55697+ const struct vfsmount *mnt,
55698+ const int unsafe_share);
55699+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
55700+ const struct vfsmount *mnt);
55701+__u32 gr_acl_handle_open(const struct dentry *dentry,
55702+ const struct vfsmount *mnt, const int fmode);
55703+__u32 gr_acl_handle_creat(const struct dentry *dentry,
55704+ const struct dentry *p_dentry,
55705+ const struct vfsmount *p_mnt, const int fmode,
55706+ const int imode);
55707+void gr_handle_create(const struct dentry *dentry,
55708+ const struct vfsmount *mnt);
55709+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
55710+ const struct dentry *parent_dentry,
55711+ const struct vfsmount *parent_mnt,
55712+ const int mode);
55713+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
55714+ const struct dentry *parent_dentry,
55715+ const struct vfsmount *parent_mnt);
55716+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
55717+ const struct vfsmount *mnt);
55718+void gr_handle_delete(const ino_t ino, const dev_t dev);
55719+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
55720+ const struct vfsmount *mnt);
55721+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
55722+ const struct dentry *parent_dentry,
55723+ const struct vfsmount *parent_mnt,
55724+ const char *from);
55725+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
55726+ const struct dentry *parent_dentry,
55727+ const struct vfsmount *parent_mnt,
55728+ const struct dentry *old_dentry,
55729+ const struct vfsmount *old_mnt, const char *to);
55730+int gr_acl_handle_rename(struct dentry *new_dentry,
55731+ struct dentry *parent_dentry,
55732+ const struct vfsmount *parent_mnt,
55733+ struct dentry *old_dentry,
55734+ struct inode *old_parent_inode,
55735+ struct vfsmount *old_mnt, const char *newname);
55736+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
55737+ struct dentry *old_dentry,
55738+ struct dentry *new_dentry,
55739+ struct vfsmount *mnt, const __u8 replace);
55740+__u32 gr_check_link(const struct dentry *new_dentry,
55741+ const struct dentry *parent_dentry,
55742+ const struct vfsmount *parent_mnt,
55743+ const struct dentry *old_dentry,
55744+ const struct vfsmount *old_mnt);
55745+int gr_acl_handle_filldir(const struct file *file, const char *name,
55746+ const unsigned int namelen, const ino_t ino);
55747+
55748+__u32 gr_acl_handle_unix(const struct dentry *dentry,
55749+ const struct vfsmount *mnt);
55750+void gr_acl_handle_exit(void);
55751+void gr_acl_handle_psacct(struct task_struct *task, const long code);
55752+int gr_acl_handle_procpidmem(const struct task_struct *task);
55753+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
55754+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
55755+void gr_audit_ptrace(struct task_struct *task);
55756+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
55757+
55758+#ifdef CONFIG_GRKERNSEC
55759+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
55760+void gr_handle_vm86(void);
55761+void gr_handle_mem_readwrite(u64 from, u64 to);
55762+
55763+extern int grsec_enable_dmesg;
55764+extern int grsec_disable_privio;
55765+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55766+extern int grsec_enable_chroot_findtask;
55767+#endif
55768+#endif
55769+
55770+#endif
55771diff -urNp linux-3.0.4/include/linux/grsock.h linux-3.0.4/include/linux/grsock.h
55772--- linux-3.0.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
55773+++ linux-3.0.4/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
55774@@ -0,0 +1,19 @@
55775+#ifndef __GRSOCK_H
55776+#define __GRSOCK_H
55777+
55778+extern void gr_attach_curr_ip(const struct sock *sk);
55779+extern int gr_handle_sock_all(const int family, const int type,
55780+ const int protocol);
55781+extern int gr_handle_sock_server(const struct sockaddr *sck);
55782+extern int gr_handle_sock_server_other(const struct sock *sck);
55783+extern int gr_handle_sock_client(const struct sockaddr *sck);
55784+extern int gr_search_connect(struct socket * sock,
55785+ struct sockaddr_in * addr);
55786+extern int gr_search_bind(struct socket * sock,
55787+ struct sockaddr_in * addr);
55788+extern int gr_search_listen(struct socket * sock);
55789+extern int gr_search_accept(struct socket * sock);
55790+extern int gr_search_socket(const int domain, const int type,
55791+ const int protocol);
55792+
55793+#endif
55794diff -urNp linux-3.0.4/include/linux/hid.h linux-3.0.4/include/linux/hid.h
55795--- linux-3.0.4/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
55796+++ linux-3.0.4/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
55797@@ -675,7 +675,7 @@ struct hid_ll_driver {
55798 unsigned int code, int value);
55799
55800 int (*parse)(struct hid_device *hdev);
55801-};
55802+} __no_const;
55803
55804 #define PM_HINT_FULLON 1<<5
55805 #define PM_HINT_NORMAL 1<<1
55806diff -urNp linux-3.0.4/include/linux/highmem.h linux-3.0.4/include/linux/highmem.h
55807--- linux-3.0.4/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
55808+++ linux-3.0.4/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
55809@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
55810 kunmap_atomic(kaddr, KM_USER0);
55811 }
55812
55813+static inline void sanitize_highpage(struct page *page)
55814+{
55815+ void *kaddr;
55816+ unsigned long flags;
55817+
55818+ local_irq_save(flags);
55819+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
55820+ clear_page(kaddr);
55821+ kunmap_atomic(kaddr, KM_CLEARPAGE);
55822+ local_irq_restore(flags);
55823+}
55824+
55825 static inline void zero_user_segments(struct page *page,
55826 unsigned start1, unsigned end1,
55827 unsigned start2, unsigned end2)
55828diff -urNp linux-3.0.4/include/linux/i2c.h linux-3.0.4/include/linux/i2c.h
55829--- linux-3.0.4/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
55830+++ linux-3.0.4/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
55831@@ -346,6 +346,7 @@ struct i2c_algorithm {
55832 /* To determine what the adapter supports */
55833 u32 (*functionality) (struct i2c_adapter *);
55834 };
55835+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
55836
55837 /*
55838 * i2c_adapter is the structure used to identify a physical i2c bus along
55839diff -urNp linux-3.0.4/include/linux/i2o.h linux-3.0.4/include/linux/i2o.h
55840--- linux-3.0.4/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
55841+++ linux-3.0.4/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
55842@@ -564,7 +564,7 @@ struct i2o_controller {
55843 struct i2o_device *exec; /* Executive */
55844 #if BITS_PER_LONG == 64
55845 spinlock_t context_list_lock; /* lock for context_list */
55846- atomic_t context_list_counter; /* needed for unique contexts */
55847+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
55848 struct list_head context_list; /* list of context id's
55849 and pointers */
55850 #endif
55851diff -urNp linux-3.0.4/include/linux/init.h linux-3.0.4/include/linux/init.h
55852--- linux-3.0.4/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
55853+++ linux-3.0.4/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
55854@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
55855
55856 /* Each module must use one module_init(). */
55857 #define module_init(initfn) \
55858- static inline initcall_t __inittest(void) \
55859+ static inline __used initcall_t __inittest(void) \
55860 { return initfn; } \
55861 int init_module(void) __attribute__((alias(#initfn)));
55862
55863 /* This is only required if you want to be unloadable. */
55864 #define module_exit(exitfn) \
55865- static inline exitcall_t __exittest(void) \
55866+ static inline __used exitcall_t __exittest(void) \
55867 { return exitfn; } \
55868 void cleanup_module(void) __attribute__((alias(#exitfn)));
55869
55870diff -urNp linux-3.0.4/include/linux/init_task.h linux-3.0.4/include/linux/init_task.h
55871--- linux-3.0.4/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
55872+++ linux-3.0.4/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
55873@@ -126,6 +126,12 @@ extern struct cred init_cred;
55874 # define INIT_PERF_EVENTS(tsk)
55875 #endif
55876
55877+#ifdef CONFIG_X86
55878+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
55879+#else
55880+#define INIT_TASK_THREAD_INFO
55881+#endif
55882+
55883 /*
55884 * INIT_TASK is used to set up the first task table, touch at
55885 * your own risk!. Base=0, limit=0x1fffff (=2MB)
55886@@ -164,6 +170,7 @@ extern struct cred init_cred;
55887 RCU_INIT_POINTER(.cred, &init_cred), \
55888 .comm = "swapper", \
55889 .thread = INIT_THREAD, \
55890+ INIT_TASK_THREAD_INFO \
55891 .fs = &init_fs, \
55892 .files = &init_files, \
55893 .signal = &init_signals, \
55894diff -urNp linux-3.0.4/include/linux/intel-iommu.h linux-3.0.4/include/linux/intel-iommu.h
55895--- linux-3.0.4/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
55896+++ linux-3.0.4/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
55897@@ -296,7 +296,7 @@ struct iommu_flush {
55898 u8 fm, u64 type);
55899 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
55900 unsigned int size_order, u64 type);
55901-};
55902+} __no_const;
55903
55904 enum {
55905 SR_DMAR_FECTL_REG,
55906diff -urNp linux-3.0.4/include/linux/interrupt.h linux-3.0.4/include/linux/interrupt.h
55907--- linux-3.0.4/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
55908+++ linux-3.0.4/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
55909@@ -422,7 +422,7 @@ enum
55910 /* map softirq index to softirq name. update 'softirq_to_name' in
55911 * kernel/softirq.c when adding a new softirq.
55912 */
55913-extern char *softirq_to_name[NR_SOFTIRQS];
55914+extern const char * const softirq_to_name[NR_SOFTIRQS];
55915
55916 /* softirq mask and active fields moved to irq_cpustat_t in
55917 * asm/hardirq.h to get better cache usage. KAO
55918@@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
55919
55920 struct softirq_action
55921 {
55922- void (*action)(struct softirq_action *);
55923+ void (*action)(void);
55924 };
55925
55926 asmlinkage void do_softirq(void);
55927 asmlinkage void __do_softirq(void);
55928-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
55929+extern void open_softirq(int nr, void (*action)(void));
55930 extern void softirq_init(void);
55931 static inline void __raise_softirq_irqoff(unsigned int nr)
55932 {
55933diff -urNp linux-3.0.4/include/linux/kallsyms.h linux-3.0.4/include/linux/kallsyms.h
55934--- linux-3.0.4/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
55935+++ linux-3.0.4/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
55936@@ -15,7 +15,8 @@
55937
55938 struct module;
55939
55940-#ifdef CONFIG_KALLSYMS
55941+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
55942+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55943 /* Lookup the address for a symbol. Returns 0 if not found. */
55944 unsigned long kallsyms_lookup_name(const char *name);
55945
55946@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
55947 /* Stupid that this does nothing, but I didn't create this mess. */
55948 #define __print_symbol(fmt, addr)
55949 #endif /*CONFIG_KALLSYMS*/
55950+#else /* when included by kallsyms.c, vsnprintf.c, or
55951+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
55952+extern void __print_symbol(const char *fmt, unsigned long address);
55953+extern int sprint_backtrace(char *buffer, unsigned long address);
55954+extern int sprint_symbol(char *buffer, unsigned long address);
55955+const char *kallsyms_lookup(unsigned long addr,
55956+ unsigned long *symbolsize,
55957+ unsigned long *offset,
55958+ char **modname, char *namebuf);
55959+#endif
55960
55961 /* This macro allows us to keep printk typechecking */
55962 static void __check_printsym_format(const char *fmt, ...)
55963diff -urNp linux-3.0.4/include/linux/kgdb.h linux-3.0.4/include/linux/kgdb.h
55964--- linux-3.0.4/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
55965+++ linux-3.0.4/include/linux/kgdb.h 2011-08-26 19:49:56.000000000 -0400
55966@@ -53,7 +53,7 @@ extern int kgdb_connected;
55967 extern int kgdb_io_module_registered;
55968
55969 extern atomic_t kgdb_setting_breakpoint;
55970-extern atomic_t kgdb_cpu_doing_single_step;
55971+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
55972
55973 extern struct task_struct *kgdb_usethread;
55974 extern struct task_struct *kgdb_contthread;
55975@@ -251,7 +251,7 @@ struct kgdb_arch {
55976 void (*disable_hw_break)(struct pt_regs *regs);
55977 void (*remove_all_hw_break)(void);
55978 void (*correct_hw_break)(void);
55979-};
55980+} __do_const;
55981
55982 /**
55983 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
55984@@ -276,7 +276,7 @@ struct kgdb_io {
55985 void (*pre_exception) (void);
55986 void (*post_exception) (void);
55987 int is_console;
55988-};
55989+} __do_const;
55990
55991 extern struct kgdb_arch arch_kgdb_ops;
55992
55993diff -urNp linux-3.0.4/include/linux/kmod.h linux-3.0.4/include/linux/kmod.h
55994--- linux-3.0.4/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
55995+++ linux-3.0.4/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
55996@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
55997 * usually useless though. */
55998 extern int __request_module(bool wait, const char *name, ...) \
55999 __attribute__((format(printf, 2, 3)));
56000+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
56001+ __attribute__((format(printf, 3, 4)));
56002 #define request_module(mod...) __request_module(true, mod)
56003 #define request_module_nowait(mod...) __request_module(false, mod)
56004 #define try_then_request_module(x, mod...) \
56005diff -urNp linux-3.0.4/include/linux/kvm_host.h linux-3.0.4/include/linux/kvm_host.h
56006--- linux-3.0.4/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
56007+++ linux-3.0.4/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
56008@@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
56009 void vcpu_load(struct kvm_vcpu *vcpu);
56010 void vcpu_put(struct kvm_vcpu *vcpu);
56011
56012-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
56013+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
56014 struct module *module);
56015 void kvm_exit(void);
56016
56017@@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
56018 struct kvm_guest_debug *dbg);
56019 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
56020
56021-int kvm_arch_init(void *opaque);
56022+int kvm_arch_init(const void *opaque);
56023 void kvm_arch_exit(void);
56024
56025 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
56026diff -urNp linux-3.0.4/include/linux/libata.h linux-3.0.4/include/linux/libata.h
56027--- linux-3.0.4/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
56028+++ linux-3.0.4/include/linux/libata.h 2011-08-26 19:49:56.000000000 -0400
56029@@ -899,7 +899,7 @@ struct ata_port_operations {
56030 * fields must be pointers.
56031 */
56032 const struct ata_port_operations *inherits;
56033-};
56034+} __do_const;
56035
56036 struct ata_port_info {
56037 unsigned long flags;
56038diff -urNp linux-3.0.4/include/linux/mca.h linux-3.0.4/include/linux/mca.h
56039--- linux-3.0.4/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
56040+++ linux-3.0.4/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
56041@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
56042 int region);
56043 void * (*mca_transform_memory)(struct mca_device *,
56044 void *memory);
56045-};
56046+} __no_const;
56047
56048 struct mca_bus {
56049 u64 default_dma_mask;
56050diff -urNp linux-3.0.4/include/linux/memory.h linux-3.0.4/include/linux/memory.h
56051--- linux-3.0.4/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
56052+++ linux-3.0.4/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
56053@@ -144,7 +144,7 @@ struct memory_accessor {
56054 size_t count);
56055 ssize_t (*write)(struct memory_accessor *, const char *buf,
56056 off_t offset, size_t count);
56057-};
56058+} __no_const;
56059
56060 /*
56061 * Kernel text modification mutex, used for code patching. Users of this lock
56062diff -urNp linux-3.0.4/include/linux/mfd/abx500.h linux-3.0.4/include/linux/mfd/abx500.h
56063--- linux-3.0.4/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
56064+++ linux-3.0.4/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
56065@@ -234,6 +234,7 @@ struct abx500_ops {
56066 int (*event_registers_startup_state_get) (struct device *, u8 *);
56067 int (*startup_irq_enabled) (struct device *, unsigned int);
56068 };
56069+typedef struct abx500_ops __no_const abx500_ops_no_const;
56070
56071 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
56072 void abx500_remove_ops(struct device *dev);
56073diff -urNp linux-3.0.4/include/linux/mm.h linux-3.0.4/include/linux/mm.h
56074--- linux-3.0.4/include/linux/mm.h 2011-08-23 21:44:40.000000000 -0400
56075+++ linux-3.0.4/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
56076@@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
56077
56078 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
56079 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
56080+
56081+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
56082+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
56083+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
56084+#else
56085 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
56086+#endif
56087+
56088 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
56089 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
56090
56091@@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
56092 int set_page_dirty_lock(struct page *page);
56093 int clear_page_dirty_for_io(struct page *page);
56094
56095-/* Is the vma a continuation of the stack vma above it? */
56096-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
56097-{
56098- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
56099-}
56100-
56101-static inline int stack_guard_page_start(struct vm_area_struct *vma,
56102- unsigned long addr)
56103-{
56104- return (vma->vm_flags & VM_GROWSDOWN) &&
56105- (vma->vm_start == addr) &&
56106- !vma_growsdown(vma->vm_prev, addr);
56107-}
56108-
56109-/* Is the vma a continuation of the stack vma below it? */
56110-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
56111-{
56112- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
56113-}
56114-
56115-static inline int stack_guard_page_end(struct vm_area_struct *vma,
56116- unsigned long addr)
56117-{
56118- return (vma->vm_flags & VM_GROWSUP) &&
56119- (vma->vm_end == addr) &&
56120- !vma_growsup(vma->vm_next, addr);
56121-}
56122-
56123 extern unsigned long move_page_tables(struct vm_area_struct *vma,
56124 unsigned long old_addr, struct vm_area_struct *new_vma,
56125 unsigned long new_addr, unsigned long len);
56126@@ -1169,6 +1148,15 @@ struct shrinker {
56127 extern void register_shrinker(struct shrinker *);
56128 extern void unregister_shrinker(struct shrinker *);
56129
56130+#ifdef CONFIG_MMU
56131+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
56132+#else
56133+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
56134+{
56135+ return __pgprot(0);
56136+}
56137+#endif
56138+
56139 int vma_wants_writenotify(struct vm_area_struct *vma);
56140
56141 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
56142@@ -1452,6 +1440,7 @@ out:
56143 }
56144
56145 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
56146+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
56147
56148 extern unsigned long do_brk(unsigned long, unsigned long);
56149
56150@@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
56151 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
56152 struct vm_area_struct **pprev);
56153
56154+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
56155+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
56156+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
56157+
56158 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
56159 NULL if none. Assume start_addr < end_addr. */
56160 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
56161@@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
56162 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
56163 }
56164
56165-#ifdef CONFIG_MMU
56166-pgprot_t vm_get_page_prot(unsigned long vm_flags);
56167-#else
56168-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
56169-{
56170- return __pgprot(0);
56171-}
56172-#endif
56173-
56174 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
56175 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
56176 unsigned long pfn, unsigned long size, pgprot_t);
56177@@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
56178 extern int sysctl_memory_failure_early_kill;
56179 extern int sysctl_memory_failure_recovery;
56180 extern void shake_page(struct page *p, int access);
56181-extern atomic_long_t mce_bad_pages;
56182+extern atomic_long_unchecked_t mce_bad_pages;
56183 extern int soft_offline_page(struct page *page, int flags);
56184
56185 extern void dump_page(struct page *page);
56186@@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
56187 unsigned int pages_per_huge_page);
56188 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
56189
56190+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56191+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
56192+#else
56193+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
56194+#endif
56195+
56196 #endif /* __KERNEL__ */
56197 #endif /* _LINUX_MM_H */
56198diff -urNp linux-3.0.4/include/linux/mm_types.h linux-3.0.4/include/linux/mm_types.h
56199--- linux-3.0.4/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
56200+++ linux-3.0.4/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
56201@@ -184,6 +184,8 @@ struct vm_area_struct {
56202 #ifdef CONFIG_NUMA
56203 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
56204 #endif
56205+
56206+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
56207 };
56208
56209 struct core_thread {
56210@@ -316,6 +318,24 @@ struct mm_struct {
56211 #ifdef CONFIG_CPUMASK_OFFSTACK
56212 struct cpumask cpumask_allocation;
56213 #endif
56214+
56215+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56216+ unsigned long pax_flags;
56217+#endif
56218+
56219+#ifdef CONFIG_PAX_DLRESOLVE
56220+ unsigned long call_dl_resolve;
56221+#endif
56222+
56223+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
56224+ unsigned long call_syscall;
56225+#endif
56226+
56227+#ifdef CONFIG_PAX_ASLR
56228+ unsigned long delta_mmap; /* randomized offset */
56229+ unsigned long delta_stack; /* randomized offset */
56230+#endif
56231+
56232 };
56233
56234 static inline void mm_init_cpumask(struct mm_struct *mm)
56235diff -urNp linux-3.0.4/include/linux/mmu_notifier.h linux-3.0.4/include/linux/mmu_notifier.h
56236--- linux-3.0.4/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
56237+++ linux-3.0.4/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
56238@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
56239 */
56240 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
56241 ({ \
56242- pte_t __pte; \
56243+ pte_t ___pte; \
56244 struct vm_area_struct *___vma = __vma; \
56245 unsigned long ___address = __address; \
56246- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
56247+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
56248 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
56249- __pte; \
56250+ ___pte; \
56251 })
56252
56253 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
56254diff -urNp linux-3.0.4/include/linux/mmzone.h linux-3.0.4/include/linux/mmzone.h
56255--- linux-3.0.4/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
56256+++ linux-3.0.4/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
56257@@ -350,7 +350,7 @@ struct zone {
56258 unsigned long flags; /* zone flags, see below */
56259
56260 /* Zone statistics */
56261- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56262+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56263
56264 /*
56265 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
56266diff -urNp linux-3.0.4/include/linux/mod_devicetable.h linux-3.0.4/include/linux/mod_devicetable.h
56267--- linux-3.0.4/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
56268+++ linux-3.0.4/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
56269@@ -12,7 +12,7 @@
56270 typedef unsigned long kernel_ulong_t;
56271 #endif
56272
56273-#define PCI_ANY_ID (~0)
56274+#define PCI_ANY_ID ((__u16)~0)
56275
56276 struct pci_device_id {
56277 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
56278@@ -131,7 +131,7 @@ struct usb_device_id {
56279 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
56280 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
56281
56282-#define HID_ANY_ID (~0)
56283+#define HID_ANY_ID (~0U)
56284
56285 struct hid_device_id {
56286 __u16 bus;
56287diff -urNp linux-3.0.4/include/linux/module.h linux-3.0.4/include/linux/module.h
56288--- linux-3.0.4/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
56289+++ linux-3.0.4/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
56290@@ -16,6 +16,7 @@
56291 #include <linux/kobject.h>
56292 #include <linux/moduleparam.h>
56293 #include <linux/tracepoint.h>
56294+#include <linux/fs.h>
56295
56296 #include <linux/percpu.h>
56297 #include <asm/module.h>
56298@@ -325,19 +326,16 @@ struct module
56299 int (*init)(void);
56300
56301 /* If this is non-NULL, vfree after init() returns */
56302- void *module_init;
56303+ void *module_init_rx, *module_init_rw;
56304
56305 /* Here is the actual code + data, vfree'd on unload. */
56306- void *module_core;
56307+ void *module_core_rx, *module_core_rw;
56308
56309 /* Here are the sizes of the init and core sections */
56310- unsigned int init_size, core_size;
56311+ unsigned int init_size_rw, core_size_rw;
56312
56313 /* The size of the executable code in each section. */
56314- unsigned int init_text_size, core_text_size;
56315-
56316- /* Size of RO sections of the module (text+rodata) */
56317- unsigned int init_ro_size, core_ro_size;
56318+ unsigned int init_size_rx, core_size_rx;
56319
56320 /* Arch-specific module values */
56321 struct mod_arch_specific arch;
56322@@ -393,6 +391,10 @@ struct module
56323 #ifdef CONFIG_EVENT_TRACING
56324 struct ftrace_event_call **trace_events;
56325 unsigned int num_trace_events;
56326+ struct file_operations trace_id;
56327+ struct file_operations trace_enable;
56328+ struct file_operations trace_format;
56329+ struct file_operations trace_filter;
56330 #endif
56331 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
56332 unsigned int num_ftrace_callsites;
56333@@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
56334 bool is_module_percpu_address(unsigned long addr);
56335 bool is_module_text_address(unsigned long addr);
56336
56337+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
56338+{
56339+
56340+#ifdef CONFIG_PAX_KERNEXEC
56341+ if (ktla_ktva(addr) >= (unsigned long)start &&
56342+ ktla_ktva(addr) < (unsigned long)start + size)
56343+ return 1;
56344+#endif
56345+
56346+ return ((void *)addr >= start && (void *)addr < start + size);
56347+}
56348+
56349+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
56350+{
56351+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
56352+}
56353+
56354+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
56355+{
56356+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
56357+}
56358+
56359+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
56360+{
56361+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
56362+}
56363+
56364+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
56365+{
56366+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
56367+}
56368+
56369 static inline int within_module_core(unsigned long addr, struct module *mod)
56370 {
56371- return (unsigned long)mod->module_core <= addr &&
56372- addr < (unsigned long)mod->module_core + mod->core_size;
56373+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
56374 }
56375
56376 static inline int within_module_init(unsigned long addr, struct module *mod)
56377 {
56378- return (unsigned long)mod->module_init <= addr &&
56379- addr < (unsigned long)mod->module_init + mod->init_size;
56380+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
56381 }
56382
56383 /* Search for module by name: must hold module_mutex. */
56384diff -urNp linux-3.0.4/include/linux/moduleloader.h linux-3.0.4/include/linux/moduleloader.h
56385--- linux-3.0.4/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
56386+++ linux-3.0.4/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
56387@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
56388 sections. Returns NULL on failure. */
56389 void *module_alloc(unsigned long size);
56390
56391+#ifdef CONFIG_PAX_KERNEXEC
56392+void *module_alloc_exec(unsigned long size);
56393+#else
56394+#define module_alloc_exec(x) module_alloc(x)
56395+#endif
56396+
56397 /* Free memory returned from module_alloc. */
56398 void module_free(struct module *mod, void *module_region);
56399
56400+#ifdef CONFIG_PAX_KERNEXEC
56401+void module_free_exec(struct module *mod, void *module_region);
56402+#else
56403+#define module_free_exec(x, y) module_free((x), (y))
56404+#endif
56405+
56406 /* Apply the given relocation to the (simplified) ELF. Return -error
56407 or 0. */
56408 int apply_relocate(Elf_Shdr *sechdrs,
56409diff -urNp linux-3.0.4/include/linux/moduleparam.h linux-3.0.4/include/linux/moduleparam.h
56410--- linux-3.0.4/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
56411+++ linux-3.0.4/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
56412@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
56413 * @len is usually just sizeof(string).
56414 */
56415 #define module_param_string(name, string, len, perm) \
56416- static const struct kparam_string __param_string_##name \
56417+ static const struct kparam_string __param_string_##name __used \
56418 = { len, string }; \
56419 __module_param_call(MODULE_PARAM_PREFIX, name, \
56420 &param_ops_string, \
56421@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
56422 * module_param_named() for why this might be necessary.
56423 */
56424 #define module_param_array_named(name, array, type, nump, perm) \
56425- static const struct kparam_array __param_arr_##name \
56426+ static const struct kparam_array __param_arr_##name __used \
56427 = { .max = ARRAY_SIZE(array), .num = nump, \
56428 .ops = &param_ops_##type, \
56429 .elemsize = sizeof(array[0]), .elem = array }; \
56430diff -urNp linux-3.0.4/include/linux/namei.h linux-3.0.4/include/linux/namei.h
56431--- linux-3.0.4/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
56432+++ linux-3.0.4/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
56433@@ -24,7 +24,7 @@ struct nameidata {
56434 unsigned seq;
56435 int last_type;
56436 unsigned depth;
56437- char *saved_names[MAX_NESTED_LINKS + 1];
56438+ const char *saved_names[MAX_NESTED_LINKS + 1];
56439
56440 /* Intent data */
56441 union {
56442@@ -91,12 +91,12 @@ extern int follow_up(struct path *);
56443 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
56444 extern void unlock_rename(struct dentry *, struct dentry *);
56445
56446-static inline void nd_set_link(struct nameidata *nd, char *path)
56447+static inline void nd_set_link(struct nameidata *nd, const char *path)
56448 {
56449 nd->saved_names[nd->depth] = path;
56450 }
56451
56452-static inline char *nd_get_link(struct nameidata *nd)
56453+static inline const char *nd_get_link(const struct nameidata *nd)
56454 {
56455 return nd->saved_names[nd->depth];
56456 }
56457diff -urNp linux-3.0.4/include/linux/netdevice.h linux-3.0.4/include/linux/netdevice.h
56458--- linux-3.0.4/include/linux/netdevice.h 2011-08-23 21:44:40.000000000 -0400
56459+++ linux-3.0.4/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
56460@@ -979,6 +979,7 @@ struct net_device_ops {
56461 int (*ndo_set_features)(struct net_device *dev,
56462 u32 features);
56463 };
56464+typedef struct net_device_ops __no_const net_device_ops_no_const;
56465
56466 /*
56467 * The DEVICE structure.
56468diff -urNp linux-3.0.4/include/linux/netfilter/xt_gradm.h linux-3.0.4/include/linux/netfilter/xt_gradm.h
56469--- linux-3.0.4/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
56470+++ linux-3.0.4/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
56471@@ -0,0 +1,9 @@
56472+#ifndef _LINUX_NETFILTER_XT_GRADM_H
56473+#define _LINUX_NETFILTER_XT_GRADM_H 1
56474+
56475+struct xt_gradm_mtinfo {
56476+ __u16 flags;
56477+ __u16 invflags;
56478+};
56479+
56480+#endif
56481diff -urNp linux-3.0.4/include/linux/of_pdt.h linux-3.0.4/include/linux/of_pdt.h
56482--- linux-3.0.4/include/linux/of_pdt.h 2011-07-21 22:17:23.000000000 -0400
56483+++ linux-3.0.4/include/linux/of_pdt.h 2011-08-30 06:20:11.000000000 -0400
56484@@ -32,7 +32,7 @@ struct of_pdt_ops {
56485
56486 /* return 0 on success; fill in 'len' with number of bytes in path */
56487 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
56488-};
56489+} __no_const;
56490
56491 extern void *prom_early_alloc(unsigned long size);
56492
56493diff -urNp linux-3.0.4/include/linux/oprofile.h linux-3.0.4/include/linux/oprofile.h
56494--- linux-3.0.4/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
56495+++ linux-3.0.4/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
56496@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
56497 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
56498 char const * name, ulong * val);
56499
56500-/** Create a file for read-only access to an atomic_t. */
56501+/** Create a file for read-only access to an atomic_unchecked_t. */
56502 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
56503- char const * name, atomic_t * val);
56504+ char const * name, atomic_unchecked_t * val);
56505
56506 /** create a directory */
56507 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
56508diff -urNp linux-3.0.4/include/linux/padata.h linux-3.0.4/include/linux/padata.h
56509--- linux-3.0.4/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
56510+++ linux-3.0.4/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
56511@@ -129,7 +129,7 @@ struct parallel_data {
56512 struct padata_instance *pinst;
56513 struct padata_parallel_queue __percpu *pqueue;
56514 struct padata_serial_queue __percpu *squeue;
56515- atomic_t seq_nr;
56516+ atomic_unchecked_t seq_nr;
56517 atomic_t reorder_objects;
56518 atomic_t refcnt;
56519 unsigned int max_seq_nr;
56520diff -urNp linux-3.0.4/include/linux/perf_event.h linux-3.0.4/include/linux/perf_event.h
56521--- linux-3.0.4/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
56522+++ linux-3.0.4/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
56523@@ -761,8 +761,8 @@ struct perf_event {
56524
56525 enum perf_event_active_state state;
56526 unsigned int attach_state;
56527- local64_t count;
56528- atomic64_t child_count;
56529+ local64_t count; /* PaX: fix it one day */
56530+ atomic64_unchecked_t child_count;
56531
56532 /*
56533 * These are the total time in nanoseconds that the event
56534@@ -813,8 +813,8 @@ struct perf_event {
56535 * These accumulate total time (in nanoseconds) that children
56536 * events have been enabled and running, respectively.
56537 */
56538- atomic64_t child_total_time_enabled;
56539- atomic64_t child_total_time_running;
56540+ atomic64_unchecked_t child_total_time_enabled;
56541+ atomic64_unchecked_t child_total_time_running;
56542
56543 /*
56544 * Protect attach/detach and child_list:
56545diff -urNp linux-3.0.4/include/linux/pipe_fs_i.h linux-3.0.4/include/linux/pipe_fs_i.h
56546--- linux-3.0.4/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
56547+++ linux-3.0.4/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
56548@@ -46,9 +46,9 @@ struct pipe_buffer {
56549 struct pipe_inode_info {
56550 wait_queue_head_t wait;
56551 unsigned int nrbufs, curbuf, buffers;
56552- unsigned int readers;
56553- unsigned int writers;
56554- unsigned int waiting_writers;
56555+ atomic_t readers;
56556+ atomic_t writers;
56557+ atomic_t waiting_writers;
56558 unsigned int r_counter;
56559 unsigned int w_counter;
56560 struct page *tmp_page;
56561diff -urNp linux-3.0.4/include/linux/pm_runtime.h linux-3.0.4/include/linux/pm_runtime.h
56562--- linux-3.0.4/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
56563+++ linux-3.0.4/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
56564@@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
56565
56566 static inline void pm_runtime_mark_last_busy(struct device *dev)
56567 {
56568- ACCESS_ONCE(dev->power.last_busy) = jiffies;
56569+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
56570 }
56571
56572 #else /* !CONFIG_PM_RUNTIME */
56573diff -urNp linux-3.0.4/include/linux/poison.h linux-3.0.4/include/linux/poison.h
56574--- linux-3.0.4/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
56575+++ linux-3.0.4/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
56576@@ -19,8 +19,8 @@
56577 * under normal circumstances, used to verify that nobody uses
56578 * non-initialized list entries.
56579 */
56580-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
56581-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
56582+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
56583+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
56584
56585 /********** include/linux/timer.h **********/
56586 /*
56587diff -urNp linux-3.0.4/include/linux/preempt.h linux-3.0.4/include/linux/preempt.h
56588--- linux-3.0.4/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
56589+++ linux-3.0.4/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
56590@@ -115,7 +115,7 @@ struct preempt_ops {
56591 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
56592 void (*sched_out)(struct preempt_notifier *notifier,
56593 struct task_struct *next);
56594-};
56595+} __no_const;
56596
56597 /**
56598 * preempt_notifier - key for installing preemption notifiers
56599diff -urNp linux-3.0.4/include/linux/proc_fs.h linux-3.0.4/include/linux/proc_fs.h
56600--- linux-3.0.4/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
56601+++ linux-3.0.4/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
56602@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
56603 return proc_create_data(name, mode, parent, proc_fops, NULL);
56604 }
56605
56606+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
56607+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
56608+{
56609+#ifdef CONFIG_GRKERNSEC_PROC_USER
56610+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
56611+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56612+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
56613+#else
56614+ return proc_create_data(name, mode, parent, proc_fops, NULL);
56615+#endif
56616+}
56617+
56618+
56619 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
56620 mode_t mode, struct proc_dir_entry *base,
56621 read_proc_t *read_proc, void * data)
56622@@ -258,7 +271,7 @@ union proc_op {
56623 int (*proc_show)(struct seq_file *m,
56624 struct pid_namespace *ns, struct pid *pid,
56625 struct task_struct *task);
56626-};
56627+} __no_const;
56628
56629 struct ctl_table_header;
56630 struct ctl_table;
56631diff -urNp linux-3.0.4/include/linux/ptrace.h linux-3.0.4/include/linux/ptrace.h
56632--- linux-3.0.4/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
56633+++ linux-3.0.4/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
56634@@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
56635 extern void exit_ptrace(struct task_struct *tracer);
56636 #define PTRACE_MODE_READ 1
56637 #define PTRACE_MODE_ATTACH 2
56638-/* Returns 0 on success, -errno on denial. */
56639-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
56640 /* Returns true on success, false on denial. */
56641 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
56642+/* Returns true on success, false on denial. */
56643+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
56644
56645 static inline int ptrace_reparented(struct task_struct *child)
56646 {
56647diff -urNp linux-3.0.4/include/linux/random.h linux-3.0.4/include/linux/random.h
56648--- linux-3.0.4/include/linux/random.h 2011-08-23 21:44:40.000000000 -0400
56649+++ linux-3.0.4/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
56650@@ -69,12 +69,17 @@ void srandom32(u32 seed);
56651
56652 u32 prandom32(struct rnd_state *);
56653
56654+static inline unsigned long pax_get_random_long(void)
56655+{
56656+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
56657+}
56658+
56659 /*
56660 * Handle minimum values for seeds
56661 */
56662 static inline u32 __seed(u32 x, u32 m)
56663 {
56664- return (x < m) ? x + m : x;
56665+ return (x <= m) ? x + m + 1 : x;
56666 }
56667
56668 /**
56669diff -urNp linux-3.0.4/include/linux/reboot.h linux-3.0.4/include/linux/reboot.h
56670--- linux-3.0.4/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
56671+++ linux-3.0.4/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
56672@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
56673 * Architecture-specific implementations of sys_reboot commands.
56674 */
56675
56676-extern void machine_restart(char *cmd);
56677-extern void machine_halt(void);
56678-extern void machine_power_off(void);
56679+extern void machine_restart(char *cmd) __noreturn;
56680+extern void machine_halt(void) __noreturn;
56681+extern void machine_power_off(void) __noreturn;
56682
56683 extern void machine_shutdown(void);
56684 struct pt_regs;
56685@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
56686 */
56687
56688 extern void kernel_restart_prepare(char *cmd);
56689-extern void kernel_restart(char *cmd);
56690-extern void kernel_halt(void);
56691-extern void kernel_power_off(void);
56692+extern void kernel_restart(char *cmd) __noreturn;
56693+extern void kernel_halt(void) __noreturn;
56694+extern void kernel_power_off(void) __noreturn;
56695
56696 extern int C_A_D; /* for sysctl */
56697 void ctrl_alt_del(void);
56698@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
56699 * Emergency restart, callable from an interrupt handler.
56700 */
56701
56702-extern void emergency_restart(void);
56703+extern void emergency_restart(void) __noreturn;
56704 #include <asm/emergency-restart.h>
56705
56706 #endif
56707diff -urNp linux-3.0.4/include/linux/reiserfs_fs.h linux-3.0.4/include/linux/reiserfs_fs.h
56708--- linux-3.0.4/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
56709+++ linux-3.0.4/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
56710@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
56711 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
56712
56713 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
56714-#define get_generation(s) atomic_read (&fs_generation(s))
56715+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
56716 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
56717 #define __fs_changed(gen,s) (gen != get_generation (s))
56718 #define fs_changed(gen,s) \
56719diff -urNp linux-3.0.4/include/linux/reiserfs_fs_sb.h linux-3.0.4/include/linux/reiserfs_fs_sb.h
56720--- linux-3.0.4/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
56721+++ linux-3.0.4/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
56722@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
56723 /* Comment? -Hans */
56724 wait_queue_head_t s_wait;
56725 /* To be obsoleted soon by per buffer seals.. -Hans */
56726- atomic_t s_generation_counter; // increased by one every time the
56727+ atomic_unchecked_t s_generation_counter; // increased by one every time the
56728 // tree gets re-balanced
56729 unsigned long s_properties; /* File system properties. Currently holds
56730 on-disk FS format */
56731diff -urNp linux-3.0.4/include/linux/relay.h linux-3.0.4/include/linux/relay.h
56732--- linux-3.0.4/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
56733+++ linux-3.0.4/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
56734@@ -159,7 +159,7 @@ struct rchan_callbacks
56735 * The callback should return 0 if successful, negative if not.
56736 */
56737 int (*remove_buf_file)(struct dentry *dentry);
56738-};
56739+} __no_const;
56740
56741 /*
56742 * CONFIG_RELAY kernel API, kernel/relay.c
56743diff -urNp linux-3.0.4/include/linux/rfkill.h linux-3.0.4/include/linux/rfkill.h
56744--- linux-3.0.4/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
56745+++ linux-3.0.4/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
56746@@ -147,6 +147,7 @@ struct rfkill_ops {
56747 void (*query)(struct rfkill *rfkill, void *data);
56748 int (*set_block)(void *data, bool blocked);
56749 };
56750+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
56751
56752 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
56753 /**
56754diff -urNp linux-3.0.4/include/linux/rmap.h linux-3.0.4/include/linux/rmap.h
56755--- linux-3.0.4/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
56756+++ linux-3.0.4/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
56757@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
56758 void anon_vma_init(void); /* create anon_vma_cachep */
56759 int anon_vma_prepare(struct vm_area_struct *);
56760 void unlink_anon_vmas(struct vm_area_struct *);
56761-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
56762-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
56763+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
56764+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
56765 void __anon_vma_link(struct vm_area_struct *);
56766
56767 static inline void anon_vma_merge(struct vm_area_struct *vma,
56768diff -urNp linux-3.0.4/include/linux/sched.h linux-3.0.4/include/linux/sched.h
56769--- linux-3.0.4/include/linux/sched.h 2011-07-21 22:17:23.000000000 -0400
56770+++ linux-3.0.4/include/linux/sched.h 2011-08-25 17:22:27.000000000 -0400
56771@@ -100,6 +100,7 @@ struct bio_list;
56772 struct fs_struct;
56773 struct perf_event_context;
56774 struct blk_plug;
56775+struct linux_binprm;
56776
56777 /*
56778 * List of flags we want to share for kernel threads,
56779@@ -380,10 +381,13 @@ struct user_namespace;
56780 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
56781
56782 extern int sysctl_max_map_count;
56783+extern unsigned long sysctl_heap_stack_gap;
56784
56785 #include <linux/aio.h>
56786
56787 #ifdef CONFIG_MMU
56788+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
56789+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
56790 extern void arch_pick_mmap_layout(struct mm_struct *mm);
56791 extern unsigned long
56792 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
56793@@ -629,6 +633,17 @@ struct signal_struct {
56794 #ifdef CONFIG_TASKSTATS
56795 struct taskstats *stats;
56796 #endif
56797+
56798+#ifdef CONFIG_GRKERNSEC
56799+ u32 curr_ip;
56800+ u32 saved_ip;
56801+ u32 gr_saddr;
56802+ u32 gr_daddr;
56803+ u16 gr_sport;
56804+ u16 gr_dport;
56805+ u8 used_accept:1;
56806+#endif
56807+
56808 #ifdef CONFIG_AUDIT
56809 unsigned audit_tty;
56810 struct tty_audit_buf *tty_audit_buf;
56811@@ -710,6 +725,11 @@ struct user_struct {
56812 struct key *session_keyring; /* UID's default session keyring */
56813 #endif
56814
56815+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56816+ unsigned int banned;
56817+ unsigned long ban_expires;
56818+#endif
56819+
56820 /* Hash table maintenance information */
56821 struct hlist_node uidhash_node;
56822 uid_t uid;
56823@@ -1340,8 +1360,8 @@ struct task_struct {
56824 struct list_head thread_group;
56825
56826 struct completion *vfork_done; /* for vfork() */
56827- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
56828- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56829+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
56830+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56831
56832 cputime_t utime, stime, utimescaled, stimescaled;
56833 cputime_t gtime;
56834@@ -1357,13 +1377,6 @@ struct task_struct {
56835 struct task_cputime cputime_expires;
56836 struct list_head cpu_timers[3];
56837
56838-/* process credentials */
56839- const struct cred __rcu *real_cred; /* objective and real subjective task
56840- * credentials (COW) */
56841- const struct cred __rcu *cred; /* effective (overridable) subjective task
56842- * credentials (COW) */
56843- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56844-
56845 char comm[TASK_COMM_LEN]; /* executable name excluding path
56846 - access with [gs]et_task_comm (which lock
56847 it with task_lock())
56848@@ -1380,8 +1393,16 @@ struct task_struct {
56849 #endif
56850 /* CPU-specific state of this task */
56851 struct thread_struct thread;
56852+/* thread_info moved to task_struct */
56853+#ifdef CONFIG_X86
56854+ struct thread_info tinfo;
56855+#endif
56856 /* filesystem information */
56857 struct fs_struct *fs;
56858+
56859+ const struct cred __rcu *cred; /* effective (overridable) subjective task
56860+ * credentials (COW) */
56861+
56862 /* open file information */
56863 struct files_struct *files;
56864 /* namespaces */
56865@@ -1428,6 +1449,11 @@ struct task_struct {
56866 struct rt_mutex_waiter *pi_blocked_on;
56867 #endif
56868
56869+/* process credentials */
56870+ const struct cred __rcu *real_cred; /* objective and real subjective task
56871+ * credentials (COW) */
56872+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56873+
56874 #ifdef CONFIG_DEBUG_MUTEXES
56875 /* mutex deadlock detection */
56876 struct mutex_waiter *blocked_on;
56877@@ -1538,6 +1564,21 @@ struct task_struct {
56878 unsigned long default_timer_slack_ns;
56879
56880 struct list_head *scm_work_list;
56881+
56882+#ifdef CONFIG_GRKERNSEC
56883+ /* grsecurity */
56884+ struct dentry *gr_chroot_dentry;
56885+ struct acl_subject_label *acl;
56886+ struct acl_role_label *role;
56887+ struct file *exec_file;
56888+ u16 acl_role_id;
56889+ /* is this the task that authenticated to the special role */
56890+ u8 acl_sp_role;
56891+ u8 is_writable;
56892+ u8 brute;
56893+ u8 gr_is_chrooted;
56894+#endif
56895+
56896 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
56897 /* Index of current stored address in ret_stack */
56898 int curr_ret_stack;
56899@@ -1572,6 +1613,57 @@ struct task_struct {
56900 #endif
56901 };
56902
56903+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
56904+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
56905+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
56906+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
56907+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
56908+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
56909+
56910+#ifdef CONFIG_PAX_SOFTMODE
56911+extern int pax_softmode;
56912+#endif
56913+
56914+extern int pax_check_flags(unsigned long *);
56915+
56916+/* if tsk != current then task_lock must be held on it */
56917+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56918+static inline unsigned long pax_get_flags(struct task_struct *tsk)
56919+{
56920+ if (likely(tsk->mm))
56921+ return tsk->mm->pax_flags;
56922+ else
56923+ return 0UL;
56924+}
56925+
56926+/* if tsk != current then task_lock must be held on it */
56927+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
56928+{
56929+ if (likely(tsk->mm)) {
56930+ tsk->mm->pax_flags = flags;
56931+ return 0;
56932+ }
56933+ return -EINVAL;
56934+}
56935+#endif
56936+
56937+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56938+extern void pax_set_initial_flags(struct linux_binprm *bprm);
56939+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
56940+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
56941+#endif
56942+
56943+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
56944+extern void pax_report_insns(void *pc, void *sp);
56945+extern void pax_report_refcount_overflow(struct pt_regs *regs);
56946+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
56947+
56948+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
56949+extern void pax_track_stack(void);
56950+#else
56951+static inline void pax_track_stack(void) {}
56952+#endif
56953+
56954 /* Future-safe accessor for struct task_struct's cpus_allowed. */
56955 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
56956
56957@@ -1768,6 +1860,7 @@ extern void thread_group_times(struct ta
56958 #define PF_DUMPCORE 0x00000200 /* dumped core */
56959 #define PF_SIGNALED 0x00000400 /* killed by a signal */
56960 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
56961+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
56962 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
56963 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
56964 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
56965@@ -2056,7 +2149,9 @@ void yield(void);
56966 extern struct exec_domain default_exec_domain;
56967
56968 union thread_union {
56969+#ifndef CONFIG_X86
56970 struct thread_info thread_info;
56971+#endif
56972 unsigned long stack[THREAD_SIZE/sizeof(long)];
56973 };
56974
56975@@ -2089,6 +2184,7 @@ extern struct pid_namespace init_pid_ns;
56976 */
56977
56978 extern struct task_struct *find_task_by_vpid(pid_t nr);
56979+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
56980 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
56981 struct pid_namespace *ns);
56982
56983@@ -2225,7 +2321,7 @@ extern void __cleanup_sighand(struct sig
56984 extern void exit_itimers(struct signal_struct *);
56985 extern void flush_itimer_signals(void);
56986
56987-extern NORET_TYPE void do_group_exit(int);
56988+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
56989
56990 extern void daemonize(const char *, ...);
56991 extern int allow_signal(int);
56992@@ -2393,13 +2489,17 @@ static inline unsigned long *end_of_stac
56993
56994 #endif
56995
56996-static inline int object_is_on_stack(void *obj)
56997+static inline int object_starts_on_stack(void *obj)
56998 {
56999- void *stack = task_stack_page(current);
57000+ const void *stack = task_stack_page(current);
57001
57002 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
57003 }
57004
57005+#ifdef CONFIG_PAX_USERCOPY
57006+extern int object_is_on_stack(const void *obj, unsigned long len);
57007+#endif
57008+
57009 extern void thread_info_cache_init(void);
57010
57011 #ifdef CONFIG_DEBUG_STACK_USAGE
57012diff -urNp linux-3.0.4/include/linux/screen_info.h linux-3.0.4/include/linux/screen_info.h
57013--- linux-3.0.4/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
57014+++ linux-3.0.4/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
57015@@ -43,7 +43,8 @@ struct screen_info {
57016 __u16 pages; /* 0x32 */
57017 __u16 vesa_attributes; /* 0x34 */
57018 __u32 capabilities; /* 0x36 */
57019- __u8 _reserved[6]; /* 0x3a */
57020+ __u16 vesapm_size; /* 0x3a */
57021+ __u8 _reserved[4]; /* 0x3c */
57022 } __attribute__((packed));
57023
57024 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
57025diff -urNp linux-3.0.4/include/linux/security.h linux-3.0.4/include/linux/security.h
57026--- linux-3.0.4/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
57027+++ linux-3.0.4/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
57028@@ -36,6 +36,7 @@
57029 #include <linux/key.h>
57030 #include <linux/xfrm.h>
57031 #include <linux/slab.h>
57032+#include <linux/grsecurity.h>
57033 #include <net/flow.h>
57034
57035 /* Maximum number of letters for an LSM name string */
57036diff -urNp linux-3.0.4/include/linux/seq_file.h linux-3.0.4/include/linux/seq_file.h
57037--- linux-3.0.4/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
57038+++ linux-3.0.4/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
57039@@ -32,6 +32,7 @@ struct seq_operations {
57040 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
57041 int (*show) (struct seq_file *m, void *v);
57042 };
57043+typedef struct seq_operations __no_const seq_operations_no_const;
57044
57045 #define SEQ_SKIP 1
57046
57047diff -urNp linux-3.0.4/include/linux/shmem_fs.h linux-3.0.4/include/linux/shmem_fs.h
57048--- linux-3.0.4/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
57049+++ linux-3.0.4/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
57050@@ -10,7 +10,7 @@
57051
57052 #define SHMEM_NR_DIRECT 16
57053
57054-#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
57055+#define SHMEM_SYMLINK_INLINE_LEN 64
57056
57057 struct shmem_inode_info {
57058 spinlock_t lock;
57059diff -urNp linux-3.0.4/include/linux/shm.h linux-3.0.4/include/linux/shm.h
57060--- linux-3.0.4/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
57061+++ linux-3.0.4/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
57062@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
57063 pid_t shm_cprid;
57064 pid_t shm_lprid;
57065 struct user_struct *mlock_user;
57066+#ifdef CONFIG_GRKERNSEC
57067+ time_t shm_createtime;
57068+ pid_t shm_lapid;
57069+#endif
57070 };
57071
57072 /* shm_mode upper byte flags */
57073diff -urNp linux-3.0.4/include/linux/skbuff.h linux-3.0.4/include/linux/skbuff.h
57074--- linux-3.0.4/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
57075+++ linux-3.0.4/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
57076@@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
57077 */
57078 static inline int skb_queue_empty(const struct sk_buff_head *list)
57079 {
57080- return list->next == (struct sk_buff *)list;
57081+ return list->next == (const struct sk_buff *)list;
57082 }
57083
57084 /**
57085@@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
57086 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
57087 const struct sk_buff *skb)
57088 {
57089- return skb->next == (struct sk_buff *)list;
57090+ return skb->next == (const struct sk_buff *)list;
57091 }
57092
57093 /**
57094@@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
57095 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
57096 const struct sk_buff *skb)
57097 {
57098- return skb->prev == (struct sk_buff *)list;
57099+ return skb->prev == (const struct sk_buff *)list;
57100 }
57101
57102 /**
57103@@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
57104 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
57105 */
57106 #ifndef NET_SKB_PAD
57107-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
57108+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
57109 #endif
57110
57111 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
57112diff -urNp linux-3.0.4/include/linux/slab_def.h linux-3.0.4/include/linux/slab_def.h
57113--- linux-3.0.4/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
57114+++ linux-3.0.4/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
57115@@ -96,10 +96,10 @@ struct kmem_cache {
57116 unsigned long node_allocs;
57117 unsigned long node_frees;
57118 unsigned long node_overflow;
57119- atomic_t allochit;
57120- atomic_t allocmiss;
57121- atomic_t freehit;
57122- atomic_t freemiss;
57123+ atomic_unchecked_t allochit;
57124+ atomic_unchecked_t allocmiss;
57125+ atomic_unchecked_t freehit;
57126+ atomic_unchecked_t freemiss;
57127
57128 /*
57129 * If debugging is enabled, then the allocator can add additional
57130diff -urNp linux-3.0.4/include/linux/slab.h linux-3.0.4/include/linux/slab.h
57131--- linux-3.0.4/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
57132+++ linux-3.0.4/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
57133@@ -11,12 +11,20 @@
57134
57135 #include <linux/gfp.h>
57136 #include <linux/types.h>
57137+#include <linux/err.h>
57138
57139 /*
57140 * Flags to pass to kmem_cache_create().
57141 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
57142 */
57143 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
57144+
57145+#ifdef CONFIG_PAX_USERCOPY
57146+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
57147+#else
57148+#define SLAB_USERCOPY 0x00000000UL
57149+#endif
57150+
57151 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
57152 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
57153 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
57154@@ -87,10 +95,13 @@
57155 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
57156 * Both make kfree a no-op.
57157 */
57158-#define ZERO_SIZE_PTR ((void *)16)
57159+#define ZERO_SIZE_PTR \
57160+({ \
57161+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
57162+ (void *)(-MAX_ERRNO-1L); \
57163+})
57164
57165-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
57166- (unsigned long)ZERO_SIZE_PTR)
57167+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
57168
57169 /*
57170 * struct kmem_cache related prototypes
57171@@ -141,6 +152,7 @@ void * __must_check krealloc(const void
57172 void kfree(const void *);
57173 void kzfree(const void *);
57174 size_t ksize(const void *);
57175+void check_object_size(const void *ptr, unsigned long n, bool to);
57176
57177 /*
57178 * Allocator specific definitions. These are mainly used to establish optimized
57179@@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
57180
57181 void __init kmem_cache_init_late(void);
57182
57183+#define kmalloc(x, y) \
57184+({ \
57185+ void *___retval; \
57186+ intoverflow_t ___x = (intoverflow_t)x; \
57187+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
57188+ ___retval = NULL; \
57189+ else \
57190+ ___retval = kmalloc((size_t)___x, (y)); \
57191+ ___retval; \
57192+})
57193+
57194+#define kmalloc_node(x, y, z) \
57195+({ \
57196+ void *___retval; \
57197+ intoverflow_t ___x = (intoverflow_t)x; \
57198+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
57199+ ___retval = NULL; \
57200+ else \
57201+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
57202+ ___retval; \
57203+})
57204+
57205+#define kzalloc(x, y) \
57206+({ \
57207+ void *___retval; \
57208+ intoverflow_t ___x = (intoverflow_t)x; \
57209+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
57210+ ___retval = NULL; \
57211+ else \
57212+ ___retval = kzalloc((size_t)___x, (y)); \
57213+ ___retval; \
57214+})
57215+
57216+#define __krealloc(x, y, z) \
57217+({ \
57218+ void *___retval; \
57219+ intoverflow_t ___y = (intoverflow_t)y; \
57220+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
57221+ ___retval = NULL; \
57222+ else \
57223+ ___retval = __krealloc((x), (size_t)___y, (z)); \
57224+ ___retval; \
57225+})
57226+
57227+#define krealloc(x, y, z) \
57228+({ \
57229+ void *___retval; \
57230+ intoverflow_t ___y = (intoverflow_t)y; \
57231+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
57232+ ___retval = NULL; \
57233+ else \
57234+ ___retval = krealloc((x), (size_t)___y, (z)); \
57235+ ___retval; \
57236+})
57237+
57238 #endif /* _LINUX_SLAB_H */
57239diff -urNp linux-3.0.4/include/linux/slub_def.h linux-3.0.4/include/linux/slub_def.h
57240--- linux-3.0.4/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
57241+++ linux-3.0.4/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
57242@@ -82,7 +82,7 @@ struct kmem_cache {
57243 struct kmem_cache_order_objects max;
57244 struct kmem_cache_order_objects min;
57245 gfp_t allocflags; /* gfp flags to use on each alloc */
57246- int refcount; /* Refcount for slab cache destroy */
57247+ atomic_t refcount; /* Refcount for slab cache destroy */
57248 void (*ctor)(void *);
57249 int inuse; /* Offset to metadata */
57250 int align; /* Alignment */
57251@@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
57252 }
57253
57254 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
57255-void *__kmalloc(size_t size, gfp_t flags);
57256+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
57257
57258 static __always_inline void *
57259 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
57260diff -urNp linux-3.0.4/include/linux/sonet.h linux-3.0.4/include/linux/sonet.h
57261--- linux-3.0.4/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
57262+++ linux-3.0.4/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
57263@@ -61,7 +61,7 @@ struct sonet_stats {
57264 #include <asm/atomic.h>
57265
57266 struct k_sonet_stats {
57267-#define __HANDLE_ITEM(i) atomic_t i
57268+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57269 __SONET_ITEMS
57270 #undef __HANDLE_ITEM
57271 };
57272diff -urNp linux-3.0.4/include/linux/sunrpc/clnt.h linux-3.0.4/include/linux/sunrpc/clnt.h
57273--- linux-3.0.4/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
57274+++ linux-3.0.4/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
57275@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
57276 {
57277 switch (sap->sa_family) {
57278 case AF_INET:
57279- return ntohs(((struct sockaddr_in *)sap)->sin_port);
57280+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
57281 case AF_INET6:
57282- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
57283+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
57284 }
57285 return 0;
57286 }
57287@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
57288 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
57289 const struct sockaddr *src)
57290 {
57291- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
57292+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
57293 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
57294
57295 dsin->sin_family = ssin->sin_family;
57296@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
57297 if (sa->sa_family != AF_INET6)
57298 return 0;
57299
57300- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
57301+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
57302 }
57303
57304 #endif /* __KERNEL__ */
57305diff -urNp linux-3.0.4/include/linux/sunrpc/svc_rdma.h linux-3.0.4/include/linux/sunrpc/svc_rdma.h
57306--- linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
57307+++ linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
57308@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
57309 extern unsigned int svcrdma_max_requests;
57310 extern unsigned int svcrdma_max_req_size;
57311
57312-extern atomic_t rdma_stat_recv;
57313-extern atomic_t rdma_stat_read;
57314-extern atomic_t rdma_stat_write;
57315-extern atomic_t rdma_stat_sq_starve;
57316-extern atomic_t rdma_stat_rq_starve;
57317-extern atomic_t rdma_stat_rq_poll;
57318-extern atomic_t rdma_stat_rq_prod;
57319-extern atomic_t rdma_stat_sq_poll;
57320-extern atomic_t rdma_stat_sq_prod;
57321+extern atomic_unchecked_t rdma_stat_recv;
57322+extern atomic_unchecked_t rdma_stat_read;
57323+extern atomic_unchecked_t rdma_stat_write;
57324+extern atomic_unchecked_t rdma_stat_sq_starve;
57325+extern atomic_unchecked_t rdma_stat_rq_starve;
57326+extern atomic_unchecked_t rdma_stat_rq_poll;
57327+extern atomic_unchecked_t rdma_stat_rq_prod;
57328+extern atomic_unchecked_t rdma_stat_sq_poll;
57329+extern atomic_unchecked_t rdma_stat_sq_prod;
57330
57331 #define RPCRDMA_VERSION 1
57332
57333diff -urNp linux-3.0.4/include/linux/sysctl.h linux-3.0.4/include/linux/sysctl.h
57334--- linux-3.0.4/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
57335+++ linux-3.0.4/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
57336@@ -155,7 +155,11 @@ enum
57337 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
57338 };
57339
57340-
57341+#ifdef CONFIG_PAX_SOFTMODE
57342+enum {
57343+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
57344+};
57345+#endif
57346
57347 /* CTL_VM names: */
57348 enum
57349@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
57350
57351 extern int proc_dostring(struct ctl_table *, int,
57352 void __user *, size_t *, loff_t *);
57353+extern int proc_dostring_modpriv(struct ctl_table *, int,
57354+ void __user *, size_t *, loff_t *);
57355 extern int proc_dointvec(struct ctl_table *, int,
57356 void __user *, size_t *, loff_t *);
57357 extern int proc_dointvec_minmax(struct ctl_table *, int,
57358diff -urNp linux-3.0.4/include/linux/tty_ldisc.h linux-3.0.4/include/linux/tty_ldisc.h
57359--- linux-3.0.4/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
57360+++ linux-3.0.4/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
57361@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
57362
57363 struct module *owner;
57364
57365- int refcount;
57366+ atomic_t refcount;
57367 };
57368
57369 struct tty_ldisc {
57370diff -urNp linux-3.0.4/include/linux/types.h linux-3.0.4/include/linux/types.h
57371--- linux-3.0.4/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
57372+++ linux-3.0.4/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
57373@@ -213,10 +213,26 @@ typedef struct {
57374 int counter;
57375 } atomic_t;
57376
57377+#ifdef CONFIG_PAX_REFCOUNT
57378+typedef struct {
57379+ int counter;
57380+} atomic_unchecked_t;
57381+#else
57382+typedef atomic_t atomic_unchecked_t;
57383+#endif
57384+
57385 #ifdef CONFIG_64BIT
57386 typedef struct {
57387 long counter;
57388 } atomic64_t;
57389+
57390+#ifdef CONFIG_PAX_REFCOUNT
57391+typedef struct {
57392+ long counter;
57393+} atomic64_unchecked_t;
57394+#else
57395+typedef atomic64_t atomic64_unchecked_t;
57396+#endif
57397 #endif
57398
57399 struct list_head {
57400diff -urNp linux-3.0.4/include/linux/uaccess.h linux-3.0.4/include/linux/uaccess.h
57401--- linux-3.0.4/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
57402+++ linux-3.0.4/include/linux/uaccess.h 2011-08-23 21:47:56.000000000 -0400
57403@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
57404 long ret; \
57405 mm_segment_t old_fs = get_fs(); \
57406 \
57407- set_fs(KERNEL_DS); \
57408 pagefault_disable(); \
57409+ set_fs(KERNEL_DS); \
57410 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
57411- pagefault_enable(); \
57412 set_fs(old_fs); \
57413+ pagefault_enable(); \
57414 ret; \
57415 })
57416
57417diff -urNp linux-3.0.4/include/linux/unaligned/access_ok.h linux-3.0.4/include/linux/unaligned/access_ok.h
57418--- linux-3.0.4/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
57419+++ linux-3.0.4/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
57420@@ -6,32 +6,32 @@
57421
57422 static inline u16 get_unaligned_le16(const void *p)
57423 {
57424- return le16_to_cpup((__le16 *)p);
57425+ return le16_to_cpup((const __le16 *)p);
57426 }
57427
57428 static inline u32 get_unaligned_le32(const void *p)
57429 {
57430- return le32_to_cpup((__le32 *)p);
57431+ return le32_to_cpup((const __le32 *)p);
57432 }
57433
57434 static inline u64 get_unaligned_le64(const void *p)
57435 {
57436- return le64_to_cpup((__le64 *)p);
57437+ return le64_to_cpup((const __le64 *)p);
57438 }
57439
57440 static inline u16 get_unaligned_be16(const void *p)
57441 {
57442- return be16_to_cpup((__be16 *)p);
57443+ return be16_to_cpup((const __be16 *)p);
57444 }
57445
57446 static inline u32 get_unaligned_be32(const void *p)
57447 {
57448- return be32_to_cpup((__be32 *)p);
57449+ return be32_to_cpup((const __be32 *)p);
57450 }
57451
57452 static inline u64 get_unaligned_be64(const void *p)
57453 {
57454- return be64_to_cpup((__be64 *)p);
57455+ return be64_to_cpup((const __be64 *)p);
57456 }
57457
57458 static inline void put_unaligned_le16(u16 val, void *p)
57459diff -urNp linux-3.0.4/include/linux/vmalloc.h linux-3.0.4/include/linux/vmalloc.h
57460--- linux-3.0.4/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
57461+++ linux-3.0.4/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
57462@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
57463 #define VM_MAP 0x00000004 /* vmap()ed pages */
57464 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
57465 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
57466+
57467+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
57468+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
57469+#endif
57470+
57471 /* bits [20..32] reserved for arch specific ioremap internals */
57472
57473 /*
57474@@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
57475 # endif
57476 #endif
57477
57478+#define vmalloc(x) \
57479+({ \
57480+ void *___retval; \
57481+ intoverflow_t ___x = (intoverflow_t)x; \
57482+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
57483+ ___retval = NULL; \
57484+ else \
57485+ ___retval = vmalloc((unsigned long)___x); \
57486+ ___retval; \
57487+})
57488+
57489+#define vzalloc(x) \
57490+({ \
57491+ void *___retval; \
57492+ intoverflow_t ___x = (intoverflow_t)x; \
57493+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
57494+ ___retval = NULL; \
57495+ else \
57496+ ___retval = vzalloc((unsigned long)___x); \
57497+ ___retval; \
57498+})
57499+
57500+#define __vmalloc(x, y, z) \
57501+({ \
57502+ void *___retval; \
57503+ intoverflow_t ___x = (intoverflow_t)x; \
57504+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
57505+ ___retval = NULL; \
57506+ else \
57507+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
57508+ ___retval; \
57509+})
57510+
57511+#define vmalloc_user(x) \
57512+({ \
57513+ void *___retval; \
57514+ intoverflow_t ___x = (intoverflow_t)x; \
57515+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
57516+ ___retval = NULL; \
57517+ else \
57518+ ___retval = vmalloc_user((unsigned long)___x); \
57519+ ___retval; \
57520+})
57521+
57522+#define vmalloc_exec(x) \
57523+({ \
57524+ void *___retval; \
57525+ intoverflow_t ___x = (intoverflow_t)x; \
57526+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
57527+ ___retval = NULL; \
57528+ else \
57529+ ___retval = vmalloc_exec((unsigned long)___x); \
57530+ ___retval; \
57531+})
57532+
57533+#define vmalloc_node(x, y) \
57534+({ \
57535+ void *___retval; \
57536+ intoverflow_t ___x = (intoverflow_t)x; \
57537+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
57538+ ___retval = NULL; \
57539+ else \
57540+ ___retval = vmalloc_node((unsigned long)___x, (y));\
57541+ ___retval; \
57542+})
57543+
57544+#define vzalloc_node(x, y) \
57545+({ \
57546+ void *___retval; \
57547+ intoverflow_t ___x = (intoverflow_t)x; \
57548+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
57549+ ___retval = NULL; \
57550+ else \
57551+ ___retval = vzalloc_node((unsigned long)___x, (y));\
57552+ ___retval; \
57553+})
57554+
57555+#define vmalloc_32(x) \
57556+({ \
57557+ void *___retval; \
57558+ intoverflow_t ___x = (intoverflow_t)x; \
57559+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
57560+ ___retval = NULL; \
57561+ else \
57562+ ___retval = vmalloc_32((unsigned long)___x); \
57563+ ___retval; \
57564+})
57565+
57566+#define vmalloc_32_user(x) \
57567+({ \
57568+void *___retval; \
57569+ intoverflow_t ___x = (intoverflow_t)x; \
57570+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
57571+ ___retval = NULL; \
57572+ else \
57573+ ___retval = vmalloc_32_user((unsigned long)___x);\
57574+ ___retval; \
57575+})
57576+
57577 #endif /* _LINUX_VMALLOC_H */
57578diff -urNp linux-3.0.4/include/linux/vmstat.h linux-3.0.4/include/linux/vmstat.h
57579--- linux-3.0.4/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
57580+++ linux-3.0.4/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
57581@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
57582 /*
57583 * Zone based page accounting with per cpu differentials.
57584 */
57585-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57586+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57587
57588 static inline void zone_page_state_add(long x, struct zone *zone,
57589 enum zone_stat_item item)
57590 {
57591- atomic_long_add(x, &zone->vm_stat[item]);
57592- atomic_long_add(x, &vm_stat[item]);
57593+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
57594+ atomic_long_add_unchecked(x, &vm_stat[item]);
57595 }
57596
57597 static inline unsigned long global_page_state(enum zone_stat_item item)
57598 {
57599- long x = atomic_long_read(&vm_stat[item]);
57600+ long x = atomic_long_read_unchecked(&vm_stat[item]);
57601 #ifdef CONFIG_SMP
57602 if (x < 0)
57603 x = 0;
57604@@ -109,7 +109,7 @@ static inline unsigned long global_page_
57605 static inline unsigned long zone_page_state(struct zone *zone,
57606 enum zone_stat_item item)
57607 {
57608- long x = atomic_long_read(&zone->vm_stat[item]);
57609+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57610 #ifdef CONFIG_SMP
57611 if (x < 0)
57612 x = 0;
57613@@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
57614 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
57615 enum zone_stat_item item)
57616 {
57617- long x = atomic_long_read(&zone->vm_stat[item]);
57618+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57619
57620 #ifdef CONFIG_SMP
57621 int cpu;
57622@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
57623
57624 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
57625 {
57626- atomic_long_inc(&zone->vm_stat[item]);
57627- atomic_long_inc(&vm_stat[item]);
57628+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
57629+ atomic_long_inc_unchecked(&vm_stat[item]);
57630 }
57631
57632 static inline void __inc_zone_page_state(struct page *page,
57633@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
57634
57635 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
57636 {
57637- atomic_long_dec(&zone->vm_stat[item]);
57638- atomic_long_dec(&vm_stat[item]);
57639+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
57640+ atomic_long_dec_unchecked(&vm_stat[item]);
57641 }
57642
57643 static inline void __dec_zone_page_state(struct page *page,
57644diff -urNp linux-3.0.4/include/media/saa7146_vv.h linux-3.0.4/include/media/saa7146_vv.h
57645--- linux-3.0.4/include/media/saa7146_vv.h 2011-07-21 22:17:23.000000000 -0400
57646+++ linux-3.0.4/include/media/saa7146_vv.h 2011-08-24 18:26:09.000000000 -0400
57647@@ -163,7 +163,7 @@ struct saa7146_ext_vv
57648 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
57649
57650 /* the extension can override this */
57651- struct v4l2_ioctl_ops ops;
57652+ v4l2_ioctl_ops_no_const ops;
57653 /* pointer to the saa7146 core ops */
57654 const struct v4l2_ioctl_ops *core_ops;
57655
57656diff -urNp linux-3.0.4/include/media/v4l2-ioctl.h linux-3.0.4/include/media/v4l2-ioctl.h
57657--- linux-3.0.4/include/media/v4l2-ioctl.h 2011-07-21 22:17:23.000000000 -0400
57658+++ linux-3.0.4/include/media/v4l2-ioctl.h 2011-08-24 18:25:45.000000000 -0400
57659@@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
57660 long (*vidioc_default) (struct file *file, void *fh,
57661 bool valid_prio, int cmd, void *arg);
57662 };
57663+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
57664
57665
57666 /* v4l debugging and diagnostics */
57667diff -urNp linux-3.0.4/include/net/caif/cfctrl.h linux-3.0.4/include/net/caif/cfctrl.h
57668--- linux-3.0.4/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
57669+++ linux-3.0.4/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
57670@@ -52,7 +52,7 @@ struct cfctrl_rsp {
57671 void (*radioset_rsp)(void);
57672 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
57673 struct cflayer *client_layer);
57674-};
57675+} __no_const;
57676
57677 /* Link Setup Parameters for CAIF-Links. */
57678 struct cfctrl_link_param {
57679@@ -101,8 +101,8 @@ struct cfctrl_request_info {
57680 struct cfctrl {
57681 struct cfsrvl serv;
57682 struct cfctrl_rsp res;
57683- atomic_t req_seq_no;
57684- atomic_t rsp_seq_no;
57685+ atomic_unchecked_t req_seq_no;
57686+ atomic_unchecked_t rsp_seq_no;
57687 struct list_head list;
57688 /* Protects from simultaneous access to first_req list */
57689 spinlock_t info_list_lock;
57690diff -urNp linux-3.0.4/include/net/flow.h linux-3.0.4/include/net/flow.h
57691--- linux-3.0.4/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
57692+++ linux-3.0.4/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
57693@@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
57694 u8 dir, flow_resolve_t resolver, void *ctx);
57695
57696 extern void flow_cache_flush(void);
57697-extern atomic_t flow_cache_genid;
57698+extern atomic_unchecked_t flow_cache_genid;
57699
57700 #endif
57701diff -urNp linux-3.0.4/include/net/inetpeer.h linux-3.0.4/include/net/inetpeer.h
57702--- linux-3.0.4/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
57703+++ linux-3.0.4/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
57704@@ -43,8 +43,8 @@ struct inet_peer {
57705 */
57706 union {
57707 struct {
57708- atomic_t rid; /* Frag reception counter */
57709- atomic_t ip_id_count; /* IP ID for the next packet */
57710+ atomic_unchecked_t rid; /* Frag reception counter */
57711+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
57712 __u32 tcp_ts;
57713 __u32 tcp_ts_stamp;
57714 u32 metrics[RTAX_MAX];
57715@@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
57716 {
57717 more++;
57718 inet_peer_refcheck(p);
57719- return atomic_add_return(more, &p->ip_id_count) - more;
57720+ return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
57721 }
57722
57723 #endif /* _NET_INETPEER_H */
57724diff -urNp linux-3.0.4/include/net/ip_fib.h linux-3.0.4/include/net/ip_fib.h
57725--- linux-3.0.4/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
57726+++ linux-3.0.4/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
57727@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
57728
57729 #define FIB_RES_SADDR(net, res) \
57730 ((FIB_RES_NH(res).nh_saddr_genid == \
57731- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
57732+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
57733 FIB_RES_NH(res).nh_saddr : \
57734 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
57735 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
57736diff -urNp linux-3.0.4/include/net/ip_vs.h linux-3.0.4/include/net/ip_vs.h
57737--- linux-3.0.4/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
57738+++ linux-3.0.4/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
57739@@ -509,7 +509,7 @@ struct ip_vs_conn {
57740 struct ip_vs_conn *control; /* Master control connection */
57741 atomic_t n_control; /* Number of controlled ones */
57742 struct ip_vs_dest *dest; /* real server */
57743- atomic_t in_pkts; /* incoming packet counter */
57744+ atomic_unchecked_t in_pkts; /* incoming packet counter */
57745
57746 /* packet transmitter for different forwarding methods. If it
57747 mangles the packet, it must return NF_DROP or better NF_STOLEN,
57748@@ -647,7 +647,7 @@ struct ip_vs_dest {
57749 __be16 port; /* port number of the server */
57750 union nf_inet_addr addr; /* IP address of the server */
57751 volatile unsigned flags; /* dest status flags */
57752- atomic_t conn_flags; /* flags to copy to conn */
57753+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
57754 atomic_t weight; /* server weight */
57755
57756 atomic_t refcnt; /* reference counter */
57757diff -urNp linux-3.0.4/include/net/irda/ircomm_core.h linux-3.0.4/include/net/irda/ircomm_core.h
57758--- linux-3.0.4/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
57759+++ linux-3.0.4/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
57760@@ -51,7 +51,7 @@ typedef struct {
57761 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
57762 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
57763 struct ircomm_info *);
57764-} call_t;
57765+} __no_const call_t;
57766
57767 struct ircomm_cb {
57768 irda_queue_t queue;
57769diff -urNp linux-3.0.4/include/net/irda/ircomm_tty.h linux-3.0.4/include/net/irda/ircomm_tty.h
57770--- linux-3.0.4/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
57771+++ linux-3.0.4/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
57772@@ -35,6 +35,7 @@
57773 #include <linux/termios.h>
57774 #include <linux/timer.h>
57775 #include <linux/tty.h> /* struct tty_struct */
57776+#include <asm/local.h>
57777
57778 #include <net/irda/irias_object.h>
57779 #include <net/irda/ircomm_core.h>
57780@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
57781 unsigned short close_delay;
57782 unsigned short closing_wait; /* time to wait before closing */
57783
57784- int open_count;
57785- int blocked_open; /* # of blocked opens */
57786+ local_t open_count;
57787+ local_t blocked_open; /* # of blocked opens */
57788
57789 /* Protect concurent access to :
57790 * o self->open_count
57791diff -urNp linux-3.0.4/include/net/iucv/af_iucv.h linux-3.0.4/include/net/iucv/af_iucv.h
57792--- linux-3.0.4/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
57793+++ linux-3.0.4/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
57794@@ -87,7 +87,7 @@ struct iucv_sock {
57795 struct iucv_sock_list {
57796 struct hlist_head head;
57797 rwlock_t lock;
57798- atomic_t autobind_name;
57799+ atomic_unchecked_t autobind_name;
57800 };
57801
57802 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
57803diff -urNp linux-3.0.4/include/net/lapb.h linux-3.0.4/include/net/lapb.h
57804--- linux-3.0.4/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
57805+++ linux-3.0.4/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
57806@@ -95,7 +95,7 @@ struct lapb_cb {
57807 struct sk_buff_head write_queue;
57808 struct sk_buff_head ack_queue;
57809 unsigned char window;
57810- struct lapb_register_struct callbacks;
57811+ struct lapb_register_struct *callbacks;
57812
57813 /* FRMR control information */
57814 struct lapb_frame frmr_data;
57815diff -urNp linux-3.0.4/include/net/neighbour.h linux-3.0.4/include/net/neighbour.h
57816--- linux-3.0.4/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
57817+++ linux-3.0.4/include/net/neighbour.h 2011-08-26 19:49:56.000000000 -0400
57818@@ -117,14 +117,14 @@ struct neighbour {
57819 };
57820
57821 struct neigh_ops {
57822- int family;
57823+ const int family;
57824 void (*solicit)(struct neighbour *, struct sk_buff*);
57825 void (*error_report)(struct neighbour *, struct sk_buff*);
57826 int (*output)(struct sk_buff*);
57827 int (*connected_output)(struct sk_buff*);
57828 int (*hh_output)(struct sk_buff*);
57829 int (*queue_xmit)(struct sk_buff*);
57830-};
57831+} __do_const;
57832
57833 struct pneigh_entry {
57834 struct pneigh_entry *next;
57835diff -urNp linux-3.0.4/include/net/netlink.h linux-3.0.4/include/net/netlink.h
57836--- linux-3.0.4/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
57837+++ linux-3.0.4/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
57838@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
57839 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
57840 {
57841 if (mark)
57842- skb_trim(skb, (unsigned char *) mark - skb->data);
57843+ skb_trim(skb, (const unsigned char *) mark - skb->data);
57844 }
57845
57846 /**
57847diff -urNp linux-3.0.4/include/net/netns/ipv4.h linux-3.0.4/include/net/netns/ipv4.h
57848--- linux-3.0.4/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
57849+++ linux-3.0.4/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
57850@@ -56,8 +56,8 @@ struct netns_ipv4 {
57851
57852 unsigned int sysctl_ping_group_range[2];
57853
57854- atomic_t rt_genid;
57855- atomic_t dev_addr_genid;
57856+ atomic_unchecked_t rt_genid;
57857+ atomic_unchecked_t dev_addr_genid;
57858
57859 #ifdef CONFIG_IP_MROUTE
57860 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
57861diff -urNp linux-3.0.4/include/net/sctp/sctp.h linux-3.0.4/include/net/sctp/sctp.h
57862--- linux-3.0.4/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
57863+++ linux-3.0.4/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
57864@@ -315,9 +315,9 @@ do { \
57865
57866 #else /* SCTP_DEBUG */
57867
57868-#define SCTP_DEBUG_PRINTK(whatever...)
57869-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
57870-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
57871+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
57872+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
57873+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
57874 #define SCTP_ENABLE_DEBUG
57875 #define SCTP_DISABLE_DEBUG
57876 #define SCTP_ASSERT(expr, str, func)
57877diff -urNp linux-3.0.4/include/net/sock.h linux-3.0.4/include/net/sock.h
57878--- linux-3.0.4/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
57879+++ linux-3.0.4/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
57880@@ -277,7 +277,7 @@ struct sock {
57881 #ifdef CONFIG_RPS
57882 __u32 sk_rxhash;
57883 #endif
57884- atomic_t sk_drops;
57885+ atomic_unchecked_t sk_drops;
57886 int sk_rcvbuf;
57887
57888 struct sk_filter __rcu *sk_filter;
57889@@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
57890 }
57891
57892 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
57893- char __user *from, char *to,
57894+ char __user *from, unsigned char *to,
57895 int copy, int offset)
57896 {
57897 if (skb->ip_summed == CHECKSUM_NONE) {
57898diff -urNp linux-3.0.4/include/net/tcp.h linux-3.0.4/include/net/tcp.h
57899--- linux-3.0.4/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
57900+++ linux-3.0.4/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
57901@@ -1374,8 +1374,8 @@ enum tcp_seq_states {
57902 struct tcp_seq_afinfo {
57903 char *name;
57904 sa_family_t family;
57905- struct file_operations seq_fops;
57906- struct seq_operations seq_ops;
57907+ file_operations_no_const seq_fops;
57908+ seq_operations_no_const seq_ops;
57909 };
57910
57911 struct tcp_iter_state {
57912diff -urNp linux-3.0.4/include/net/udp.h linux-3.0.4/include/net/udp.h
57913--- linux-3.0.4/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
57914+++ linux-3.0.4/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
57915@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
57916 char *name;
57917 sa_family_t family;
57918 struct udp_table *udp_table;
57919- struct file_operations seq_fops;
57920- struct seq_operations seq_ops;
57921+ file_operations_no_const seq_fops;
57922+ seq_operations_no_const seq_ops;
57923 };
57924
57925 struct udp_iter_state {
57926diff -urNp linux-3.0.4/include/net/xfrm.h linux-3.0.4/include/net/xfrm.h
57927--- linux-3.0.4/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
57928+++ linux-3.0.4/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
57929@@ -505,7 +505,7 @@ struct xfrm_policy {
57930 struct timer_list timer;
57931
57932 struct flow_cache_object flo;
57933- atomic_t genid;
57934+ atomic_unchecked_t genid;
57935 u32 priority;
57936 u32 index;
57937 struct xfrm_mark mark;
57938diff -urNp linux-3.0.4/include/rdma/iw_cm.h linux-3.0.4/include/rdma/iw_cm.h
57939--- linux-3.0.4/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
57940+++ linux-3.0.4/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
57941@@ -120,7 +120,7 @@ struct iw_cm_verbs {
57942 int backlog);
57943
57944 int (*destroy_listen)(struct iw_cm_id *cm_id);
57945-};
57946+} __no_const;
57947
57948 /**
57949 * iw_create_cm_id - Create an IW CM identifier.
57950diff -urNp linux-3.0.4/include/scsi/libfc.h linux-3.0.4/include/scsi/libfc.h
57951--- linux-3.0.4/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
57952+++ linux-3.0.4/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
57953@@ -750,6 +750,7 @@ struct libfc_function_template {
57954 */
57955 void (*disc_stop_final) (struct fc_lport *);
57956 };
57957+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
57958
57959 /**
57960 * struct fc_disc - Discovery context
57961@@ -853,7 +854,7 @@ struct fc_lport {
57962 struct fc_vport *vport;
57963
57964 /* Operational Information */
57965- struct libfc_function_template tt;
57966+ libfc_function_template_no_const tt;
57967 u8 link_up;
57968 u8 qfull;
57969 enum fc_lport_state state;
57970diff -urNp linux-3.0.4/include/scsi/scsi_device.h linux-3.0.4/include/scsi/scsi_device.h
57971--- linux-3.0.4/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
57972+++ linux-3.0.4/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
57973@@ -161,9 +161,9 @@ struct scsi_device {
57974 unsigned int max_device_blocked; /* what device_blocked counts down from */
57975 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
57976
57977- atomic_t iorequest_cnt;
57978- atomic_t iodone_cnt;
57979- atomic_t ioerr_cnt;
57980+ atomic_unchecked_t iorequest_cnt;
57981+ atomic_unchecked_t iodone_cnt;
57982+ atomic_unchecked_t ioerr_cnt;
57983
57984 struct device sdev_gendev,
57985 sdev_dev;
57986diff -urNp linux-3.0.4/include/scsi/scsi_transport_fc.h linux-3.0.4/include/scsi/scsi_transport_fc.h
57987--- linux-3.0.4/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
57988+++ linux-3.0.4/include/scsi/scsi_transport_fc.h 2011-08-26 19:49:56.000000000 -0400
57989@@ -711,7 +711,7 @@ struct fc_function_template {
57990 unsigned long show_host_system_hostname:1;
57991
57992 unsigned long disable_target_scan:1;
57993-};
57994+} __do_const;
57995
57996
57997 /**
57998diff -urNp linux-3.0.4/include/sound/ak4xxx-adda.h linux-3.0.4/include/sound/ak4xxx-adda.h
57999--- linux-3.0.4/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
58000+++ linux-3.0.4/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
58001@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
58002 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
58003 unsigned char val);
58004 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
58005-};
58006+} __no_const;
58007
58008 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
58009
58010diff -urNp linux-3.0.4/include/sound/hwdep.h linux-3.0.4/include/sound/hwdep.h
58011--- linux-3.0.4/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
58012+++ linux-3.0.4/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
58013@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
58014 struct snd_hwdep_dsp_status *status);
58015 int (*dsp_load)(struct snd_hwdep *hw,
58016 struct snd_hwdep_dsp_image *image);
58017-};
58018+} __no_const;
58019
58020 struct snd_hwdep {
58021 struct snd_card *card;
58022diff -urNp linux-3.0.4/include/sound/info.h linux-3.0.4/include/sound/info.h
58023--- linux-3.0.4/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
58024+++ linux-3.0.4/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
58025@@ -44,7 +44,7 @@ struct snd_info_entry_text {
58026 struct snd_info_buffer *buffer);
58027 void (*write)(struct snd_info_entry *entry,
58028 struct snd_info_buffer *buffer);
58029-};
58030+} __no_const;
58031
58032 struct snd_info_entry_ops {
58033 int (*open)(struct snd_info_entry *entry,
58034diff -urNp linux-3.0.4/include/sound/pcm.h linux-3.0.4/include/sound/pcm.h
58035--- linux-3.0.4/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
58036+++ linux-3.0.4/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
58037@@ -81,6 +81,7 @@ struct snd_pcm_ops {
58038 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
58039 int (*ack)(struct snd_pcm_substream *substream);
58040 };
58041+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
58042
58043 /*
58044 *
58045diff -urNp linux-3.0.4/include/sound/sb16_csp.h linux-3.0.4/include/sound/sb16_csp.h
58046--- linux-3.0.4/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
58047+++ linux-3.0.4/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
58048@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
58049 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
58050 int (*csp_stop) (struct snd_sb_csp * p);
58051 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
58052-};
58053+} __no_const;
58054
58055 /*
58056 * CSP private data
58057diff -urNp linux-3.0.4/include/sound/soc.h linux-3.0.4/include/sound/soc.h
58058--- linux-3.0.4/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
58059+++ linux-3.0.4/include/sound/soc.h 2011-08-26 19:49:56.000000000 -0400
58060@@ -636,7 +636,7 @@ struct snd_soc_platform_driver {
58061
58062 /* platform stream ops */
58063 struct snd_pcm_ops *ops;
58064-};
58065+} __do_const;
58066
58067 struct snd_soc_platform {
58068 const char *name;
58069diff -urNp linux-3.0.4/include/sound/ymfpci.h linux-3.0.4/include/sound/ymfpci.h
58070--- linux-3.0.4/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
58071+++ linux-3.0.4/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
58072@@ -358,7 +358,7 @@ struct snd_ymfpci {
58073 spinlock_t reg_lock;
58074 spinlock_t voice_lock;
58075 wait_queue_head_t interrupt_sleep;
58076- atomic_t interrupt_sleep_count;
58077+ atomic_unchecked_t interrupt_sleep_count;
58078 struct snd_info_entry *proc_entry;
58079 const struct firmware *dsp_microcode;
58080 const struct firmware *controller_microcode;
58081diff -urNp linux-3.0.4/include/target/target_core_base.h linux-3.0.4/include/target/target_core_base.h
58082--- linux-3.0.4/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
58083+++ linux-3.0.4/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
58084@@ -364,7 +364,7 @@ struct t10_reservation_ops {
58085 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
58086 int (*t10_pr_register)(struct se_cmd *);
58087 int (*t10_pr_clear)(struct se_cmd *);
58088-};
58089+} __no_const;
58090
58091 struct t10_reservation_template {
58092 /* Reservation effects all target ports */
58093@@ -432,8 +432,8 @@ struct se_transport_task {
58094 atomic_t t_task_cdbs_left;
58095 atomic_t t_task_cdbs_ex_left;
58096 atomic_t t_task_cdbs_timeout_left;
58097- atomic_t t_task_cdbs_sent;
58098- atomic_t t_transport_aborted;
58099+ atomic_unchecked_t t_task_cdbs_sent;
58100+ atomic_unchecked_t t_transport_aborted;
58101 atomic_t t_transport_active;
58102 atomic_t t_transport_complete;
58103 atomic_t t_transport_queue_active;
58104@@ -774,7 +774,7 @@ struct se_device {
58105 atomic_t active_cmds;
58106 atomic_t simple_cmds;
58107 atomic_t depth_left;
58108- atomic_t dev_ordered_id;
58109+ atomic_unchecked_t dev_ordered_id;
58110 atomic_t dev_tur_active;
58111 atomic_t execute_tasks;
58112 atomic_t dev_status_thr_count;
58113diff -urNp linux-3.0.4/include/trace/events/irq.h linux-3.0.4/include/trace/events/irq.h
58114--- linux-3.0.4/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
58115+++ linux-3.0.4/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
58116@@ -36,7 +36,7 @@ struct softirq_action;
58117 */
58118 TRACE_EVENT(irq_handler_entry,
58119
58120- TP_PROTO(int irq, struct irqaction *action),
58121+ TP_PROTO(int irq, const struct irqaction *action),
58122
58123 TP_ARGS(irq, action),
58124
58125@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
58126 */
58127 TRACE_EVENT(irq_handler_exit,
58128
58129- TP_PROTO(int irq, struct irqaction *action, int ret),
58130+ TP_PROTO(int irq, const struct irqaction *action, int ret),
58131
58132 TP_ARGS(irq, action, ret),
58133
58134diff -urNp linux-3.0.4/include/video/udlfb.h linux-3.0.4/include/video/udlfb.h
58135--- linux-3.0.4/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
58136+++ linux-3.0.4/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
58137@@ -51,10 +51,10 @@ struct dlfb_data {
58138 int base8;
58139 u32 pseudo_palette[256];
58140 /* blit-only rendering path metrics, exposed through sysfs */
58141- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
58142- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
58143- atomic_t bytes_sent; /* to usb, after compression including overhead */
58144- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
58145+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
58146+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
58147+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
58148+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
58149 };
58150
58151 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
58152diff -urNp linux-3.0.4/include/video/uvesafb.h linux-3.0.4/include/video/uvesafb.h
58153--- linux-3.0.4/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
58154+++ linux-3.0.4/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
58155@@ -177,6 +177,7 @@ struct uvesafb_par {
58156 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
58157 u8 pmi_setpal; /* PMI for palette changes */
58158 u16 *pmi_base; /* protected mode interface location */
58159+ u8 *pmi_code; /* protected mode code location */
58160 void *pmi_start;
58161 void *pmi_pal;
58162 u8 *vbe_state_orig; /*
58163diff -urNp linux-3.0.4/init/do_mounts.c linux-3.0.4/init/do_mounts.c
58164--- linux-3.0.4/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
58165+++ linux-3.0.4/init/do_mounts.c 2011-08-23 21:47:56.000000000 -0400
58166@@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
58167
58168 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
58169 {
58170- int err = sys_mount(name, "/root", fs, flags, data);
58171+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
58172 if (err)
58173 return err;
58174
58175@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
58176 va_start(args, fmt);
58177 vsprintf(buf, fmt, args);
58178 va_end(args);
58179- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
58180+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
58181 if (fd >= 0) {
58182 sys_ioctl(fd, FDEJECT, 0);
58183 sys_close(fd);
58184 }
58185 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
58186- fd = sys_open("/dev/console", O_RDWR, 0);
58187+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
58188 if (fd >= 0) {
58189 sys_ioctl(fd, TCGETS, (long)&termios);
58190 termios.c_lflag &= ~ICANON;
58191 sys_ioctl(fd, TCSETSF, (long)&termios);
58192- sys_read(fd, &c, 1);
58193+ sys_read(fd, (char __user *)&c, 1);
58194 termios.c_lflag |= ICANON;
58195 sys_ioctl(fd, TCSETSF, (long)&termios);
58196 sys_close(fd);
58197@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
58198 mount_root();
58199 out:
58200 devtmpfs_mount("dev");
58201- sys_mount(".", "/", NULL, MS_MOVE, NULL);
58202+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58203 sys_chroot((const char __user __force *)".");
58204 }
58205diff -urNp linux-3.0.4/init/do_mounts.h linux-3.0.4/init/do_mounts.h
58206--- linux-3.0.4/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
58207+++ linux-3.0.4/init/do_mounts.h 2011-08-23 21:47:56.000000000 -0400
58208@@ -15,15 +15,15 @@ extern int root_mountflags;
58209
58210 static inline int create_dev(char *name, dev_t dev)
58211 {
58212- sys_unlink(name);
58213- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
58214+ sys_unlink((__force char __user *)name);
58215+ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
58216 }
58217
58218 #if BITS_PER_LONG == 32
58219 static inline u32 bstat(char *name)
58220 {
58221 struct stat64 stat;
58222- if (sys_stat64(name, &stat) != 0)
58223+ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
58224 return 0;
58225 if (!S_ISBLK(stat.st_mode))
58226 return 0;
58227diff -urNp linux-3.0.4/init/do_mounts_initrd.c linux-3.0.4/init/do_mounts_initrd.c
58228--- linux-3.0.4/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
58229+++ linux-3.0.4/init/do_mounts_initrd.c 2011-08-23 21:47:56.000000000 -0400
58230@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
58231 create_dev("/dev/root.old", Root_RAM0);
58232 /* mount initrd on rootfs' /root */
58233 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
58234- sys_mkdir("/old", 0700);
58235- root_fd = sys_open("/", 0, 0);
58236- old_fd = sys_open("/old", 0, 0);
58237+ sys_mkdir((__force const char __user *)"/old", 0700);
58238+ root_fd = sys_open((__force const char __user *)"/", 0, 0);
58239+ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
58240 /* move initrd over / and chdir/chroot in initrd root */
58241- sys_chdir("/root");
58242- sys_mount(".", "/", NULL, MS_MOVE, NULL);
58243- sys_chroot(".");
58244+ sys_chdir((__force const char __user *)"/root");
58245+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58246+ sys_chroot((__force const char __user *)".");
58247
58248 /*
58249 * In case that a resume from disk is carried out by linuxrc or one of
58250@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
58251
58252 /* move initrd to rootfs' /old */
58253 sys_fchdir(old_fd);
58254- sys_mount("/", ".", NULL, MS_MOVE, NULL);
58255+ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
58256 /* switch root and cwd back to / of rootfs */
58257 sys_fchdir(root_fd);
58258- sys_chroot(".");
58259+ sys_chroot((__force const char __user *)".");
58260 sys_close(old_fd);
58261 sys_close(root_fd);
58262
58263 if (new_decode_dev(real_root_dev) == Root_RAM0) {
58264- sys_chdir("/old");
58265+ sys_chdir((__force const char __user *)"/old");
58266 return;
58267 }
58268
58269@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
58270 mount_root();
58271
58272 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
58273- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
58274+ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
58275 if (!error)
58276 printk("okay\n");
58277 else {
58278- int fd = sys_open("/dev/root.old", O_RDWR, 0);
58279+ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
58280 if (error == -ENOENT)
58281 printk("/initrd does not exist. Ignored.\n");
58282 else
58283 printk("failed\n");
58284 printk(KERN_NOTICE "Unmounting old root\n");
58285- sys_umount("/old", MNT_DETACH);
58286+ sys_umount((__force char __user *)"/old", MNT_DETACH);
58287 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
58288 if (fd < 0) {
58289 error = fd;
58290@@ -116,11 +116,11 @@ int __init initrd_load(void)
58291 * mounted in the normal path.
58292 */
58293 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
58294- sys_unlink("/initrd.image");
58295+ sys_unlink((__force const char __user *)"/initrd.image");
58296 handle_initrd();
58297 return 1;
58298 }
58299 }
58300- sys_unlink("/initrd.image");
58301+ sys_unlink((__force const char __user *)"/initrd.image");
58302 return 0;
58303 }
58304diff -urNp linux-3.0.4/init/do_mounts_md.c linux-3.0.4/init/do_mounts_md.c
58305--- linux-3.0.4/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
58306+++ linux-3.0.4/init/do_mounts_md.c 2011-08-23 21:47:56.000000000 -0400
58307@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
58308 partitioned ? "_d" : "", minor,
58309 md_setup_args[ent].device_names);
58310
58311- fd = sys_open(name, 0, 0);
58312+ fd = sys_open((__force char __user *)name, 0, 0);
58313 if (fd < 0) {
58314 printk(KERN_ERR "md: open failed - cannot start "
58315 "array %s\n", name);
58316@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
58317 * array without it
58318 */
58319 sys_close(fd);
58320- fd = sys_open(name, 0, 0);
58321+ fd = sys_open((__force char __user *)name, 0, 0);
58322 sys_ioctl(fd, BLKRRPART, 0);
58323 }
58324 sys_close(fd);
58325diff -urNp linux-3.0.4/init/initramfs.c linux-3.0.4/init/initramfs.c
58326--- linux-3.0.4/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
58327+++ linux-3.0.4/init/initramfs.c 2011-08-23 21:47:56.000000000 -0400
58328@@ -74,7 +74,7 @@ static void __init free_hash(void)
58329 }
58330 }
58331
58332-static long __init do_utime(char __user *filename, time_t mtime)
58333+static long __init do_utime(__force char __user *filename, time_t mtime)
58334 {
58335 struct timespec t[2];
58336
58337@@ -109,7 +109,7 @@ static void __init dir_utime(void)
58338 struct dir_entry *de, *tmp;
58339 list_for_each_entry_safe(de, tmp, &dir_list, list) {
58340 list_del(&de->list);
58341- do_utime(de->name, de->mtime);
58342+ do_utime((__force char __user *)de->name, de->mtime);
58343 kfree(de->name);
58344 kfree(de);
58345 }
58346@@ -271,7 +271,7 @@ static int __init maybe_link(void)
58347 if (nlink >= 2) {
58348 char *old = find_link(major, minor, ino, mode, collected);
58349 if (old)
58350- return (sys_link(old, collected) < 0) ? -1 : 1;
58351+ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
58352 }
58353 return 0;
58354 }
58355@@ -280,11 +280,11 @@ static void __init clean_path(char *path
58356 {
58357 struct stat st;
58358
58359- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
58360+ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
58361 if (S_ISDIR(st.st_mode))
58362- sys_rmdir(path);
58363+ sys_rmdir((__force char __user *)path);
58364 else
58365- sys_unlink(path);
58366+ sys_unlink((__force char __user *)path);
58367 }
58368 }
58369
58370@@ -305,7 +305,7 @@ static int __init do_name(void)
58371 int openflags = O_WRONLY|O_CREAT;
58372 if (ml != 1)
58373 openflags |= O_TRUNC;
58374- wfd = sys_open(collected, openflags, mode);
58375+ wfd = sys_open((__force char __user *)collected, openflags, mode);
58376
58377 if (wfd >= 0) {
58378 sys_fchown(wfd, uid, gid);
58379@@ -317,17 +317,17 @@ static int __init do_name(void)
58380 }
58381 }
58382 } else if (S_ISDIR(mode)) {
58383- sys_mkdir(collected, mode);
58384- sys_chown(collected, uid, gid);
58385- sys_chmod(collected, mode);
58386+ sys_mkdir((__force char __user *)collected, mode);
58387+ sys_chown((__force char __user *)collected, uid, gid);
58388+ sys_chmod((__force char __user *)collected, mode);
58389 dir_add(collected, mtime);
58390 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
58391 S_ISFIFO(mode) || S_ISSOCK(mode)) {
58392 if (maybe_link() == 0) {
58393- sys_mknod(collected, mode, rdev);
58394- sys_chown(collected, uid, gid);
58395- sys_chmod(collected, mode);
58396- do_utime(collected, mtime);
58397+ sys_mknod((__force char __user *)collected, mode, rdev);
58398+ sys_chown((__force char __user *)collected, uid, gid);
58399+ sys_chmod((__force char __user *)collected, mode);
58400+ do_utime((__force char __user *)collected, mtime);
58401 }
58402 }
58403 return 0;
58404@@ -336,15 +336,15 @@ static int __init do_name(void)
58405 static int __init do_copy(void)
58406 {
58407 if (count >= body_len) {
58408- sys_write(wfd, victim, body_len);
58409+ sys_write(wfd, (__force char __user *)victim, body_len);
58410 sys_close(wfd);
58411- do_utime(vcollected, mtime);
58412+ do_utime((__force char __user *)vcollected, mtime);
58413 kfree(vcollected);
58414 eat(body_len);
58415 state = SkipIt;
58416 return 0;
58417 } else {
58418- sys_write(wfd, victim, count);
58419+ sys_write(wfd, (__force char __user *)victim, count);
58420 body_len -= count;
58421 eat(count);
58422 return 1;
58423@@ -355,9 +355,9 @@ static int __init do_symlink(void)
58424 {
58425 collected[N_ALIGN(name_len) + body_len] = '\0';
58426 clean_path(collected, 0);
58427- sys_symlink(collected + N_ALIGN(name_len), collected);
58428- sys_lchown(collected, uid, gid);
58429- do_utime(collected, mtime);
58430+ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
58431+ sys_lchown((__force char __user *)collected, uid, gid);
58432+ do_utime((__force char __user *)collected, mtime);
58433 state = SkipIt;
58434 next_state = Reset;
58435 return 0;
58436diff -urNp linux-3.0.4/init/Kconfig linux-3.0.4/init/Kconfig
58437--- linux-3.0.4/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
58438+++ linux-3.0.4/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
58439@@ -1195,7 +1195,7 @@ config SLUB_DEBUG
58440
58441 config COMPAT_BRK
58442 bool "Disable heap randomization"
58443- default y
58444+ default n
58445 help
58446 Randomizing heap placement makes heap exploits harder, but it
58447 also breaks ancient binaries (including anything libc5 based).
58448diff -urNp linux-3.0.4/init/main.c linux-3.0.4/init/main.c
58449--- linux-3.0.4/init/main.c 2011-07-21 22:17:23.000000000 -0400
58450+++ linux-3.0.4/init/main.c 2011-08-23 21:48:14.000000000 -0400
58451@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
58452 extern void tc_init(void);
58453 #endif
58454
58455+extern void grsecurity_init(void);
58456+
58457 /*
58458 * Debug helper: via this flag we know that we are in 'early bootup code'
58459 * where only the boot processor is running with IRQ disabled. This means
58460@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
58461
58462 __setup("reset_devices", set_reset_devices);
58463
58464+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
58465+extern char pax_enter_kernel_user[];
58466+extern char pax_exit_kernel_user[];
58467+extern pgdval_t clone_pgd_mask;
58468+#endif
58469+
58470+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
58471+static int __init setup_pax_nouderef(char *str)
58472+{
58473+#ifdef CONFIG_X86_32
58474+ unsigned int cpu;
58475+ struct desc_struct *gdt;
58476+
58477+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
58478+ gdt = get_cpu_gdt_table(cpu);
58479+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
58480+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
58481+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
58482+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
58483+ }
58484+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
58485+#else
58486+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
58487+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
58488+ clone_pgd_mask = ~(pgdval_t)0UL;
58489+#endif
58490+
58491+ return 0;
58492+}
58493+early_param("pax_nouderef", setup_pax_nouderef);
58494+#endif
58495+
58496+#ifdef CONFIG_PAX_SOFTMODE
58497+int pax_softmode;
58498+
58499+static int __init setup_pax_softmode(char *str)
58500+{
58501+ get_option(&str, &pax_softmode);
58502+ return 1;
58503+}
58504+__setup("pax_softmode=", setup_pax_softmode);
58505+#endif
58506+
58507 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
58508 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
58509 static const char *panic_later, *panic_param;
58510@@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
58511 {
58512 int count = preempt_count();
58513 int ret;
58514+ const char *msg1 = "", *msg2 = "";
58515
58516 if (initcall_debug)
58517 ret = do_one_initcall_debug(fn);
58518@@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
58519 sprintf(msgbuf, "error code %d ", ret);
58520
58521 if (preempt_count() != count) {
58522- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
58523+ msg1 = " preemption imbalance";
58524 preempt_count() = count;
58525 }
58526 if (irqs_disabled()) {
58527- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
58528+ msg2 = " disabled interrupts";
58529 local_irq_enable();
58530 }
58531- if (msgbuf[0]) {
58532- printk("initcall %pF returned with %s\n", fn, msgbuf);
58533+ if (msgbuf[0] || *msg1 || *msg2) {
58534+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
58535 }
58536
58537 return ret;
58538@@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
58539 do_basic_setup();
58540
58541 /* Open the /dev/console on the rootfs, this should never fail */
58542- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
58543+ if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
58544 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
58545
58546 (void) sys_dup(0);
58547@@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
58548 if (!ramdisk_execute_command)
58549 ramdisk_execute_command = "/init";
58550
58551- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
58552+ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
58553 ramdisk_execute_command = NULL;
58554 prepare_namespace();
58555 }
58556
58557+ grsecurity_init();
58558+
58559 /*
58560 * Ok, we have completed the initial bootup, and
58561 * we're essentially up and running. Get rid of the
58562diff -urNp linux-3.0.4/ipc/mqueue.c linux-3.0.4/ipc/mqueue.c
58563--- linux-3.0.4/ipc/mqueue.c 2011-07-21 22:17:23.000000000 -0400
58564+++ linux-3.0.4/ipc/mqueue.c 2011-08-23 21:48:14.000000000 -0400
58565@@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
58566 mq_bytes = (mq_msg_tblsz +
58567 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
58568
58569+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
58570 spin_lock(&mq_lock);
58571 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
58572 u->mq_bytes + mq_bytes >
58573diff -urNp linux-3.0.4/ipc/msg.c linux-3.0.4/ipc/msg.c
58574--- linux-3.0.4/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
58575+++ linux-3.0.4/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
58576@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
58577 return security_msg_queue_associate(msq, msgflg);
58578 }
58579
58580+static struct ipc_ops msg_ops = {
58581+ .getnew = newque,
58582+ .associate = msg_security,
58583+ .more_checks = NULL
58584+};
58585+
58586 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
58587 {
58588 struct ipc_namespace *ns;
58589- struct ipc_ops msg_ops;
58590 struct ipc_params msg_params;
58591
58592 ns = current->nsproxy->ipc_ns;
58593
58594- msg_ops.getnew = newque;
58595- msg_ops.associate = msg_security;
58596- msg_ops.more_checks = NULL;
58597-
58598 msg_params.key = key;
58599 msg_params.flg = msgflg;
58600
58601diff -urNp linux-3.0.4/ipc/sem.c linux-3.0.4/ipc/sem.c
58602--- linux-3.0.4/ipc/sem.c 2011-08-23 21:44:40.000000000 -0400
58603+++ linux-3.0.4/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
58604@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
58605 return 0;
58606 }
58607
58608+static struct ipc_ops sem_ops = {
58609+ .getnew = newary,
58610+ .associate = sem_security,
58611+ .more_checks = sem_more_checks
58612+};
58613+
58614 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
58615 {
58616 struct ipc_namespace *ns;
58617- struct ipc_ops sem_ops;
58618 struct ipc_params sem_params;
58619
58620 ns = current->nsproxy->ipc_ns;
58621@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
58622 if (nsems < 0 || nsems > ns->sc_semmsl)
58623 return -EINVAL;
58624
58625- sem_ops.getnew = newary;
58626- sem_ops.associate = sem_security;
58627- sem_ops.more_checks = sem_more_checks;
58628-
58629 sem_params.key = key;
58630 sem_params.flg = semflg;
58631 sem_params.u.nsems = nsems;
58632@@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
58633 int nsems;
58634 struct list_head tasks;
58635
58636+ pax_track_stack();
58637+
58638 sma = sem_lock_check(ns, semid);
58639 if (IS_ERR(sma))
58640 return PTR_ERR(sma);
58641@@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
58642 struct ipc_namespace *ns;
58643 struct list_head tasks;
58644
58645+ pax_track_stack();
58646+
58647 ns = current->nsproxy->ipc_ns;
58648
58649 if (nsops < 1 || semid < 0)
58650diff -urNp linux-3.0.4/ipc/shm.c linux-3.0.4/ipc/shm.c
58651--- linux-3.0.4/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
58652+++ linux-3.0.4/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
58653@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
58654 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
58655 #endif
58656
58657+#ifdef CONFIG_GRKERNSEC
58658+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58659+ const time_t shm_createtime, const uid_t cuid,
58660+ const int shmid);
58661+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58662+ const time_t shm_createtime);
58663+#endif
58664+
58665 void shm_init_ns(struct ipc_namespace *ns)
58666 {
58667 ns->shm_ctlmax = SHMMAX;
58668@@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
58669 shp->shm_lprid = 0;
58670 shp->shm_atim = shp->shm_dtim = 0;
58671 shp->shm_ctim = get_seconds();
58672+#ifdef CONFIG_GRKERNSEC
58673+ {
58674+ struct timespec timeval;
58675+ do_posix_clock_monotonic_gettime(&timeval);
58676+
58677+ shp->shm_createtime = timeval.tv_sec;
58678+ }
58679+#endif
58680 shp->shm_segsz = size;
58681 shp->shm_nattch = 0;
58682 shp->shm_file = file;
58683@@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
58684 return 0;
58685 }
58686
58687+static struct ipc_ops shm_ops = {
58688+ .getnew = newseg,
58689+ .associate = shm_security,
58690+ .more_checks = shm_more_checks
58691+};
58692+
58693 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
58694 {
58695 struct ipc_namespace *ns;
58696- struct ipc_ops shm_ops;
58697 struct ipc_params shm_params;
58698
58699 ns = current->nsproxy->ipc_ns;
58700
58701- shm_ops.getnew = newseg;
58702- shm_ops.associate = shm_security;
58703- shm_ops.more_checks = shm_more_checks;
58704-
58705 shm_params.key = key;
58706 shm_params.flg = shmflg;
58707 shm_params.u.size = size;
58708@@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
58709 case SHM_LOCK:
58710 case SHM_UNLOCK:
58711 {
58712- struct file *uninitialized_var(shm_file);
58713-
58714 lru_add_drain_all(); /* drain pagevecs to lru lists */
58715
58716 shp = shm_lock_check(ns, shmid);
58717@@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
58718 if (err)
58719 goto out_unlock;
58720
58721+#ifdef CONFIG_GRKERNSEC
58722+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
58723+ shp->shm_perm.cuid, shmid) ||
58724+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
58725+ err = -EACCES;
58726+ goto out_unlock;
58727+ }
58728+#endif
58729+
58730 path = shp->shm_file->f_path;
58731 path_get(&path);
58732 shp->shm_nattch++;
58733+#ifdef CONFIG_GRKERNSEC
58734+ shp->shm_lapid = current->pid;
58735+#endif
58736 size = i_size_read(path.dentry->d_inode);
58737 shm_unlock(shp);
58738
58739diff -urNp linux-3.0.4/kernel/acct.c linux-3.0.4/kernel/acct.c
58740--- linux-3.0.4/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
58741+++ linux-3.0.4/kernel/acct.c 2011-08-23 21:47:56.000000000 -0400
58742@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
58743 */
58744 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
58745 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
58746- file->f_op->write(file, (char *)&ac,
58747+ file->f_op->write(file, (__force char __user *)&ac,
58748 sizeof(acct_t), &file->f_pos);
58749 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
58750 set_fs(fs);
58751diff -urNp linux-3.0.4/kernel/audit.c linux-3.0.4/kernel/audit.c
58752--- linux-3.0.4/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
58753+++ linux-3.0.4/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
58754@@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
58755 3) suppressed due to audit_rate_limit
58756 4) suppressed due to audit_backlog_limit
58757 */
58758-static atomic_t audit_lost = ATOMIC_INIT(0);
58759+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
58760
58761 /* The netlink socket. */
58762 static struct sock *audit_sock;
58763@@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
58764 unsigned long now;
58765 int print;
58766
58767- atomic_inc(&audit_lost);
58768+ atomic_inc_unchecked(&audit_lost);
58769
58770 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
58771
58772@@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
58773 printk(KERN_WARNING
58774 "audit: audit_lost=%d audit_rate_limit=%d "
58775 "audit_backlog_limit=%d\n",
58776- atomic_read(&audit_lost),
58777+ atomic_read_unchecked(&audit_lost),
58778 audit_rate_limit,
58779 audit_backlog_limit);
58780 audit_panic(message);
58781@@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
58782 status_set.pid = audit_pid;
58783 status_set.rate_limit = audit_rate_limit;
58784 status_set.backlog_limit = audit_backlog_limit;
58785- status_set.lost = atomic_read(&audit_lost);
58786+ status_set.lost = atomic_read_unchecked(&audit_lost);
58787 status_set.backlog = skb_queue_len(&audit_skb_queue);
58788 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
58789 &status_set, sizeof(status_set));
58790diff -urNp linux-3.0.4/kernel/auditsc.c linux-3.0.4/kernel/auditsc.c
58791--- linux-3.0.4/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
58792+++ linux-3.0.4/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
58793@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
58794 }
58795
58796 /* global counter which is incremented every time something logs in */
58797-static atomic_t session_id = ATOMIC_INIT(0);
58798+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
58799
58800 /**
58801 * audit_set_loginuid - set a task's audit_context loginuid
58802@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
58803 */
58804 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
58805 {
58806- unsigned int sessionid = atomic_inc_return(&session_id);
58807+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
58808 struct audit_context *context = task->audit_context;
58809
58810 if (context && context->in_syscall) {
58811diff -urNp linux-3.0.4/kernel/capability.c linux-3.0.4/kernel/capability.c
58812--- linux-3.0.4/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
58813+++ linux-3.0.4/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
58814@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
58815 * before modification is attempted and the application
58816 * fails.
58817 */
58818+ if (tocopy > ARRAY_SIZE(kdata))
58819+ return -EFAULT;
58820+
58821 if (copy_to_user(dataptr, kdata, tocopy
58822 * sizeof(struct __user_cap_data_struct))) {
58823 return -EFAULT;
58824@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
58825 BUG();
58826 }
58827
58828- if (security_capable(ns, current_cred(), cap) == 0) {
58829+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
58830 current->flags |= PF_SUPERPRIV;
58831 return true;
58832 }
58833@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
58834 }
58835 EXPORT_SYMBOL(ns_capable);
58836
58837+bool ns_capable_nolog(struct user_namespace *ns, int cap)
58838+{
58839+ if (unlikely(!cap_valid(cap))) {
58840+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
58841+ BUG();
58842+ }
58843+
58844+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
58845+ current->flags |= PF_SUPERPRIV;
58846+ return true;
58847+ }
58848+ return false;
58849+}
58850+EXPORT_SYMBOL(ns_capable_nolog);
58851+
58852+bool capable_nolog(int cap)
58853+{
58854+ return ns_capable_nolog(&init_user_ns, cap);
58855+}
58856+EXPORT_SYMBOL(capable_nolog);
58857+
58858 /**
58859 * task_ns_capable - Determine whether current task has a superior
58860 * capability targeted at a specific task's user namespace.
58861@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
58862 }
58863 EXPORT_SYMBOL(task_ns_capable);
58864
58865+bool task_ns_capable_nolog(struct task_struct *t, int cap)
58866+{
58867+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
58868+}
58869+EXPORT_SYMBOL(task_ns_capable_nolog);
58870+
58871 /**
58872 * nsown_capable - Check superior capability to one's own user_ns
58873 * @cap: The capability in question
58874diff -urNp linux-3.0.4/kernel/cgroup.c linux-3.0.4/kernel/cgroup.c
58875--- linux-3.0.4/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
58876+++ linux-3.0.4/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
58877@@ -593,6 +593,8 @@ static struct css_set *find_css_set(
58878 struct hlist_head *hhead;
58879 struct cg_cgroup_link *link;
58880
58881+ pax_track_stack();
58882+
58883 /* First see if we already have a cgroup group that matches
58884 * the desired set */
58885 read_lock(&css_set_lock);
58886diff -urNp linux-3.0.4/kernel/compat.c linux-3.0.4/kernel/compat.c
58887--- linux-3.0.4/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
58888+++ linux-3.0.4/kernel/compat.c 2011-08-23 21:48:14.000000000 -0400
58889@@ -13,6 +13,7 @@
58890
58891 #include <linux/linkage.h>
58892 #include <linux/compat.h>
58893+#include <linux/module.h>
58894 #include <linux/errno.h>
58895 #include <linux/time.h>
58896 #include <linux/signal.h>
58897diff -urNp linux-3.0.4/kernel/configs.c linux-3.0.4/kernel/configs.c
58898--- linux-3.0.4/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
58899+++ linux-3.0.4/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
58900@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
58901 struct proc_dir_entry *entry;
58902
58903 /* create the current config file */
58904+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
58905+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
58906+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
58907+ &ikconfig_file_ops);
58908+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58909+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
58910+ &ikconfig_file_ops);
58911+#endif
58912+#else
58913 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
58914 &ikconfig_file_ops);
58915+#endif
58916+
58917 if (!entry)
58918 return -ENOMEM;
58919
58920diff -urNp linux-3.0.4/kernel/cred.c linux-3.0.4/kernel/cred.c
58921--- linux-3.0.4/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
58922+++ linux-3.0.4/kernel/cred.c 2011-08-25 17:23:03.000000000 -0400
58923@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
58924 */
58925 void __put_cred(struct cred *cred)
58926 {
58927+ pax_track_stack();
58928+
58929 kdebug("__put_cred(%p{%d,%d})", cred,
58930 atomic_read(&cred->usage),
58931 read_cred_subscribers(cred));
58932@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
58933 {
58934 struct cred *cred;
58935
58936+ pax_track_stack();
58937+
58938 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
58939 atomic_read(&tsk->cred->usage),
58940 read_cred_subscribers(tsk->cred));
58941@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
58942 {
58943 const struct cred *cred;
58944
58945+ pax_track_stack();
58946+
58947 rcu_read_lock();
58948
58949 do {
58950@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
58951 {
58952 struct cred *new;
58953
58954+ pax_track_stack();
58955+
58956 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
58957 if (!new)
58958 return NULL;
58959@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
58960 const struct cred *old;
58961 struct cred *new;
58962
58963+ pax_track_stack();
58964+
58965 validate_process_creds();
58966
58967 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
58968@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
58969 struct thread_group_cred *tgcred = NULL;
58970 struct cred *new;
58971
58972+ pax_track_stack();
58973+
58974 #ifdef CONFIG_KEYS
58975 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
58976 if (!tgcred)
58977@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
58978 struct cred *new;
58979 int ret;
58980
58981+ pax_track_stack();
58982+
58983 if (
58984 #ifdef CONFIG_KEYS
58985 !p->cred->thread_keyring &&
58986@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
58987 struct task_struct *task = current;
58988 const struct cred *old = task->real_cred;
58989
58990+ pax_track_stack();
58991+
58992 kdebug("commit_creds(%p{%d,%d})", new,
58993 atomic_read(&new->usage),
58994 read_cred_subscribers(new));
58995@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
58996
58997 get_cred(new); /* we will require a ref for the subj creds too */
58998
58999+ gr_set_role_label(task, new->uid, new->gid);
59000+
59001 /* dumpability changes */
59002 if (old->euid != new->euid ||
59003 old->egid != new->egid ||
59004@@ -508,10 +526,8 @@ int commit_creds(struct cred *new)
59005 key_fsgid_changed(task);
59006
59007 /* do it
59008- * - What if a process setreuid()'s and this brings the
59009- * new uid over his NPROC rlimit? We can check this now
59010- * cheaply with the new uid cache, so if it matters
59011- * we should be checking for it. -DaveM
59012+ * RLIMIT_NPROC limits on user->processes have already been checked
59013+ * in set_user().
59014 */
59015 alter_cred_subscribers(new, 2);
59016 if (new->user != old->user)
59017@@ -551,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
59018 */
59019 void abort_creds(struct cred *new)
59020 {
59021+ pax_track_stack();
59022+
59023 kdebug("abort_creds(%p{%d,%d})", new,
59024 atomic_read(&new->usage),
59025 read_cred_subscribers(new));
59026@@ -574,6 +592,8 @@ const struct cred *override_creds(const
59027 {
59028 const struct cred *old = current->cred;
59029
59030+ pax_track_stack();
59031+
59032 kdebug("override_creds(%p{%d,%d})", new,
59033 atomic_read(&new->usage),
59034 read_cred_subscribers(new));
59035@@ -603,6 +623,8 @@ void revert_creds(const struct cred *old
59036 {
59037 const struct cred *override = current->cred;
59038
59039+ pax_track_stack();
59040+
59041 kdebug("revert_creds(%p{%d,%d})", old,
59042 atomic_read(&old->usage),
59043 read_cred_subscribers(old));
59044@@ -649,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
59045 const struct cred *old;
59046 struct cred *new;
59047
59048+ pax_track_stack();
59049+
59050 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59051 if (!new)
59052 return NULL;
59053@@ -703,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
59054 */
59055 int set_security_override(struct cred *new, u32 secid)
59056 {
59057+ pax_track_stack();
59058+
59059 return security_kernel_act_as(new, secid);
59060 }
59061 EXPORT_SYMBOL(set_security_override);
59062@@ -722,6 +748,8 @@ int set_security_override_from_ctx(struc
59063 u32 secid;
59064 int ret;
59065
59066+ pax_track_stack();
59067+
59068 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
59069 if (ret < 0)
59070 return ret;
59071diff -urNp linux-3.0.4/kernel/debug/debug_core.c linux-3.0.4/kernel/debug/debug_core.c
59072--- linux-3.0.4/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
59073+++ linux-3.0.4/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
59074@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
59075 */
59076 static atomic_t masters_in_kgdb;
59077 static atomic_t slaves_in_kgdb;
59078-static atomic_t kgdb_break_tasklet_var;
59079+static atomic_unchecked_t kgdb_break_tasklet_var;
59080 atomic_t kgdb_setting_breakpoint;
59081
59082 struct task_struct *kgdb_usethread;
59083@@ -129,7 +129,7 @@ int kgdb_single_step;
59084 static pid_t kgdb_sstep_pid;
59085
59086 /* to keep track of the CPU which is doing the single stepping*/
59087-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59088+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59089
59090 /*
59091 * If you are debugging a problem where roundup (the collection of
59092@@ -542,7 +542,7 @@ return_normal:
59093 * kernel will only try for the value of sstep_tries before
59094 * giving up and continuing on.
59095 */
59096- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
59097+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
59098 (kgdb_info[cpu].task &&
59099 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
59100 atomic_set(&kgdb_active, -1);
59101@@ -636,8 +636,8 @@ cpu_master_loop:
59102 }
59103
59104 kgdb_restore:
59105- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
59106- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
59107+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
59108+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
59109 if (kgdb_info[sstep_cpu].task)
59110 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
59111 else
59112@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
59113 static void kgdb_tasklet_bpt(unsigned long ing)
59114 {
59115 kgdb_breakpoint();
59116- atomic_set(&kgdb_break_tasklet_var, 0);
59117+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
59118 }
59119
59120 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
59121
59122 void kgdb_schedule_breakpoint(void)
59123 {
59124- if (atomic_read(&kgdb_break_tasklet_var) ||
59125+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
59126 atomic_read(&kgdb_active) != -1 ||
59127 atomic_read(&kgdb_setting_breakpoint))
59128 return;
59129- atomic_inc(&kgdb_break_tasklet_var);
59130+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
59131 tasklet_schedule(&kgdb_tasklet_breakpoint);
59132 }
59133 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
59134diff -urNp linux-3.0.4/kernel/debug/kdb/kdb_main.c linux-3.0.4/kernel/debug/kdb/kdb_main.c
59135--- linux-3.0.4/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
59136+++ linux-3.0.4/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
59137@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
59138 list_for_each_entry(mod, kdb_modules, list) {
59139
59140 kdb_printf("%-20s%8u 0x%p ", mod->name,
59141- mod->core_size, (void *)mod);
59142+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
59143 #ifdef CONFIG_MODULE_UNLOAD
59144 kdb_printf("%4d ", module_refcount(mod));
59145 #endif
59146@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
59147 kdb_printf(" (Loading)");
59148 else
59149 kdb_printf(" (Live)");
59150- kdb_printf(" 0x%p", mod->module_core);
59151+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
59152
59153 #ifdef CONFIG_MODULE_UNLOAD
59154 {
59155diff -urNp linux-3.0.4/kernel/events/core.c linux-3.0.4/kernel/events/core.c
59156--- linux-3.0.4/kernel/events/core.c 2011-08-23 21:44:40.000000000 -0400
59157+++ linux-3.0.4/kernel/events/core.c 2011-08-23 21:47:56.000000000 -0400
59158@@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
59159 return 0;
59160 }
59161
59162-static atomic64_t perf_event_id;
59163+static atomic64_unchecked_t perf_event_id;
59164
59165 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
59166 enum event_type_t event_type);
59167@@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
59168
59169 static inline u64 perf_event_count(struct perf_event *event)
59170 {
59171- return local64_read(&event->count) + atomic64_read(&event->child_count);
59172+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
59173 }
59174
59175 static u64 perf_event_read(struct perf_event *event)
59176@@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
59177 mutex_lock(&event->child_mutex);
59178 total += perf_event_read(event);
59179 *enabled += event->total_time_enabled +
59180- atomic64_read(&event->child_total_time_enabled);
59181+ atomic64_read_unchecked(&event->child_total_time_enabled);
59182 *running += event->total_time_running +
59183- atomic64_read(&event->child_total_time_running);
59184+ atomic64_read_unchecked(&event->child_total_time_running);
59185
59186 list_for_each_entry(child, &event->child_list, child_list) {
59187 total += perf_event_read(child);
59188@@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
59189 userpg->offset -= local64_read(&event->hw.prev_count);
59190
59191 userpg->time_enabled = event->total_time_enabled +
59192- atomic64_read(&event->child_total_time_enabled);
59193+ atomic64_read_unchecked(&event->child_total_time_enabled);
59194
59195 userpg->time_running = event->total_time_running +
59196- atomic64_read(&event->child_total_time_running);
59197+ atomic64_read_unchecked(&event->child_total_time_running);
59198
59199 barrier();
59200 ++userpg->lock;
59201@@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
59202 values[n++] = perf_event_count(event);
59203 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
59204 values[n++] = enabled +
59205- atomic64_read(&event->child_total_time_enabled);
59206+ atomic64_read_unchecked(&event->child_total_time_enabled);
59207 }
59208 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
59209 values[n++] = running +
59210- atomic64_read(&event->child_total_time_running);
59211+ atomic64_read_unchecked(&event->child_total_time_running);
59212 }
59213 if (read_format & PERF_FORMAT_ID)
59214 values[n++] = primary_event_id(event);
59215@@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
59216 event->parent = parent_event;
59217
59218 event->ns = get_pid_ns(current->nsproxy->pid_ns);
59219- event->id = atomic64_inc_return(&perf_event_id);
59220+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
59221
59222 event->state = PERF_EVENT_STATE_INACTIVE;
59223
59224@@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
59225 /*
59226 * Add back the child's count to the parent's count:
59227 */
59228- atomic64_add(child_val, &parent_event->child_count);
59229- atomic64_add(child_event->total_time_enabled,
59230+ atomic64_add_unchecked(child_val, &parent_event->child_count);
59231+ atomic64_add_unchecked(child_event->total_time_enabled,
59232 &parent_event->child_total_time_enabled);
59233- atomic64_add(child_event->total_time_running,
59234+ atomic64_add_unchecked(child_event->total_time_running,
59235 &parent_event->child_total_time_running);
59236
59237 /*
59238diff -urNp linux-3.0.4/kernel/exit.c linux-3.0.4/kernel/exit.c
59239--- linux-3.0.4/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
59240+++ linux-3.0.4/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
59241@@ -57,6 +57,10 @@
59242 #include <asm/pgtable.h>
59243 #include <asm/mmu_context.h>
59244
59245+#ifdef CONFIG_GRKERNSEC
59246+extern rwlock_t grsec_exec_file_lock;
59247+#endif
59248+
59249 static void exit_mm(struct task_struct * tsk);
59250
59251 static void __unhash_process(struct task_struct *p, bool group_dead)
59252@@ -169,6 +173,10 @@ void release_task(struct task_struct * p
59253 struct task_struct *leader;
59254 int zap_leader;
59255 repeat:
59256+#ifdef CONFIG_NET
59257+ gr_del_task_from_ip_table(p);
59258+#endif
59259+
59260 tracehook_prepare_release_task(p);
59261 /* don't need to get the RCU readlock here - the process is dead and
59262 * can't be modifying its own credentials. But shut RCU-lockdep up */
59263@@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
59264 {
59265 write_lock_irq(&tasklist_lock);
59266
59267+#ifdef CONFIG_GRKERNSEC
59268+ write_lock(&grsec_exec_file_lock);
59269+ if (current->exec_file) {
59270+ fput(current->exec_file);
59271+ current->exec_file = NULL;
59272+ }
59273+ write_unlock(&grsec_exec_file_lock);
59274+#endif
59275+
59276 ptrace_unlink(current);
59277 /* Reparent to init */
59278 current->real_parent = current->parent = kthreadd_task;
59279 list_move_tail(&current->sibling, &current->real_parent->children);
59280
59281+ gr_set_kernel_label(current);
59282+
59283 /* Set the exit signal to SIGCHLD so we signal init on exit */
59284 current->exit_signal = SIGCHLD;
59285
59286@@ -394,7 +413,7 @@ int allow_signal(int sig)
59287 * know it'll be handled, so that they don't get converted to
59288 * SIGKILL or just silently dropped.
59289 */
59290- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
59291+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
59292 recalc_sigpending();
59293 spin_unlock_irq(&current->sighand->siglock);
59294 return 0;
59295@@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
59296 vsnprintf(current->comm, sizeof(current->comm), name, args);
59297 va_end(args);
59298
59299+#ifdef CONFIG_GRKERNSEC
59300+ write_lock(&grsec_exec_file_lock);
59301+ if (current->exec_file) {
59302+ fput(current->exec_file);
59303+ current->exec_file = NULL;
59304+ }
59305+ write_unlock(&grsec_exec_file_lock);
59306+#endif
59307+
59308+ gr_set_kernel_label(current);
59309+
59310 /*
59311 * If we were started as result of loading a module, close all of the
59312 * user space pages. We don't need them, and if we didn't close them
59313@@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
59314 struct task_struct *tsk = current;
59315 int group_dead;
59316
59317- profile_task_exit(tsk);
59318-
59319- WARN_ON(atomic_read(&tsk->fs_excl));
59320- WARN_ON(blk_needs_flush_plug(tsk));
59321-
59322 if (unlikely(in_interrupt()))
59323 panic("Aiee, killing interrupt handler!");
59324- if (unlikely(!tsk->pid))
59325- panic("Attempted to kill the idle task!");
59326
59327 /*
59328 * If do_exit is called because this processes oopsed, it's possible
59329@@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
59330 */
59331 set_fs(USER_DS);
59332
59333+ profile_task_exit(tsk);
59334+
59335+ WARN_ON(atomic_read(&tsk->fs_excl));
59336+ WARN_ON(blk_needs_flush_plug(tsk));
59337+
59338+ if (unlikely(!tsk->pid))
59339+ panic("Attempted to kill the idle task!");
59340+
59341 tracehook_report_exit(&code);
59342
59343 validate_creds_for_do_exit(tsk);
59344@@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
59345 tsk->exit_code = code;
59346 taskstats_exit(tsk, group_dead);
59347
59348+ gr_acl_handle_psacct(tsk, code);
59349+ gr_acl_handle_exit();
59350+
59351 exit_mm(tsk);
59352
59353 if (group_dead)
59354diff -urNp linux-3.0.4/kernel/fork.c linux-3.0.4/kernel/fork.c
59355--- linux-3.0.4/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
59356+++ linux-3.0.4/kernel/fork.c 2011-08-25 17:23:36.000000000 -0400
59357@@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
59358 *stackend = STACK_END_MAGIC; /* for overflow detection */
59359
59360 #ifdef CONFIG_CC_STACKPROTECTOR
59361- tsk->stack_canary = get_random_int();
59362+ tsk->stack_canary = pax_get_random_long();
59363 #endif
59364
59365 /* One for us, one for whoever does the "release_task()" (usually parent) */
59366@@ -308,13 +308,77 @@ out:
59367 }
59368
59369 #ifdef CONFIG_MMU
59370+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
59371+{
59372+ struct vm_area_struct *tmp;
59373+ unsigned long charge;
59374+ struct mempolicy *pol;
59375+ struct file *file;
59376+
59377+ charge = 0;
59378+ if (mpnt->vm_flags & VM_ACCOUNT) {
59379+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
59380+ if (security_vm_enough_memory(len))
59381+ goto fail_nomem;
59382+ charge = len;
59383+ }
59384+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
59385+ if (!tmp)
59386+ goto fail_nomem;
59387+ *tmp = *mpnt;
59388+ tmp->vm_mm = mm;
59389+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
59390+ pol = mpol_dup(vma_policy(mpnt));
59391+ if (IS_ERR(pol))
59392+ goto fail_nomem_policy;
59393+ vma_set_policy(tmp, pol);
59394+ if (anon_vma_fork(tmp, mpnt))
59395+ goto fail_nomem_anon_vma_fork;
59396+ tmp->vm_flags &= ~VM_LOCKED;
59397+ tmp->vm_next = tmp->vm_prev = NULL;
59398+ tmp->vm_mirror = NULL;
59399+ file = tmp->vm_file;
59400+ if (file) {
59401+ struct inode *inode = file->f_path.dentry->d_inode;
59402+ struct address_space *mapping = file->f_mapping;
59403+
59404+ get_file(file);
59405+ if (tmp->vm_flags & VM_DENYWRITE)
59406+ atomic_dec(&inode->i_writecount);
59407+ mutex_lock(&mapping->i_mmap_mutex);
59408+ if (tmp->vm_flags & VM_SHARED)
59409+ mapping->i_mmap_writable++;
59410+ flush_dcache_mmap_lock(mapping);
59411+ /* insert tmp into the share list, just after mpnt */
59412+ vma_prio_tree_add(tmp, mpnt);
59413+ flush_dcache_mmap_unlock(mapping);
59414+ mutex_unlock(&mapping->i_mmap_mutex);
59415+ }
59416+
59417+ /*
59418+ * Clear hugetlb-related page reserves for children. This only
59419+ * affects MAP_PRIVATE mappings. Faults generated by the child
59420+ * are not guaranteed to succeed, even if read-only
59421+ */
59422+ if (is_vm_hugetlb_page(tmp))
59423+ reset_vma_resv_huge_pages(tmp);
59424+
59425+ return tmp;
59426+
59427+fail_nomem_anon_vma_fork:
59428+ mpol_put(pol);
59429+fail_nomem_policy:
59430+ kmem_cache_free(vm_area_cachep, tmp);
59431+fail_nomem:
59432+ vm_unacct_memory(charge);
59433+ return NULL;
59434+}
59435+
59436 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
59437 {
59438 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
59439 struct rb_node **rb_link, *rb_parent;
59440 int retval;
59441- unsigned long charge;
59442- struct mempolicy *pol;
59443
59444 down_write(&oldmm->mmap_sem);
59445 flush_cache_dup_mm(oldmm);
59446@@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
59447 mm->locked_vm = 0;
59448 mm->mmap = NULL;
59449 mm->mmap_cache = NULL;
59450- mm->free_area_cache = oldmm->mmap_base;
59451- mm->cached_hole_size = ~0UL;
59452+ mm->free_area_cache = oldmm->free_area_cache;
59453+ mm->cached_hole_size = oldmm->cached_hole_size;
59454 mm->map_count = 0;
59455 cpumask_clear(mm_cpumask(mm));
59456 mm->mm_rb = RB_ROOT;
59457@@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
59458
59459 prev = NULL;
59460 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
59461- struct file *file;
59462-
59463 if (mpnt->vm_flags & VM_DONTCOPY) {
59464 long pages = vma_pages(mpnt);
59465 mm->total_vm -= pages;
59466@@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
59467 -pages);
59468 continue;
59469 }
59470- charge = 0;
59471- if (mpnt->vm_flags & VM_ACCOUNT) {
59472- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
59473- if (security_vm_enough_memory(len))
59474- goto fail_nomem;
59475- charge = len;
59476- }
59477- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
59478- if (!tmp)
59479- goto fail_nomem;
59480- *tmp = *mpnt;
59481- INIT_LIST_HEAD(&tmp->anon_vma_chain);
59482- pol = mpol_dup(vma_policy(mpnt));
59483- retval = PTR_ERR(pol);
59484- if (IS_ERR(pol))
59485- goto fail_nomem_policy;
59486- vma_set_policy(tmp, pol);
59487- tmp->vm_mm = mm;
59488- if (anon_vma_fork(tmp, mpnt))
59489- goto fail_nomem_anon_vma_fork;
59490- tmp->vm_flags &= ~VM_LOCKED;
59491- tmp->vm_next = tmp->vm_prev = NULL;
59492- file = tmp->vm_file;
59493- if (file) {
59494- struct inode *inode = file->f_path.dentry->d_inode;
59495- struct address_space *mapping = file->f_mapping;
59496-
59497- get_file(file);
59498- if (tmp->vm_flags & VM_DENYWRITE)
59499- atomic_dec(&inode->i_writecount);
59500- mutex_lock(&mapping->i_mmap_mutex);
59501- if (tmp->vm_flags & VM_SHARED)
59502- mapping->i_mmap_writable++;
59503- flush_dcache_mmap_lock(mapping);
59504- /* insert tmp into the share list, just after mpnt */
59505- vma_prio_tree_add(tmp, mpnt);
59506- flush_dcache_mmap_unlock(mapping);
59507- mutex_unlock(&mapping->i_mmap_mutex);
59508+ tmp = dup_vma(mm, mpnt);
59509+ if (!tmp) {
59510+ retval = -ENOMEM;
59511+ goto out;
59512 }
59513
59514 /*
59515- * Clear hugetlb-related page reserves for children. This only
59516- * affects MAP_PRIVATE mappings. Faults generated by the child
59517- * are not guaranteed to succeed, even if read-only
59518- */
59519- if (is_vm_hugetlb_page(tmp))
59520- reset_vma_resv_huge_pages(tmp);
59521-
59522- /*
59523 * Link in the new vma and copy the page table entries.
59524 */
59525 *pprev = tmp;
59526@@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
59527 if (retval)
59528 goto out;
59529 }
59530+
59531+#ifdef CONFIG_PAX_SEGMEXEC
59532+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
59533+ struct vm_area_struct *mpnt_m;
59534+
59535+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
59536+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
59537+
59538+ if (!mpnt->vm_mirror)
59539+ continue;
59540+
59541+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
59542+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
59543+ mpnt->vm_mirror = mpnt_m;
59544+ } else {
59545+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
59546+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
59547+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
59548+ mpnt->vm_mirror->vm_mirror = mpnt;
59549+ }
59550+ }
59551+ BUG_ON(mpnt_m);
59552+ }
59553+#endif
59554+
59555 /* a new mm has just been created */
59556 arch_dup_mmap(oldmm, mm);
59557 retval = 0;
59558@@ -429,14 +474,6 @@ out:
59559 flush_tlb_mm(oldmm);
59560 up_write(&oldmm->mmap_sem);
59561 return retval;
59562-fail_nomem_anon_vma_fork:
59563- mpol_put(pol);
59564-fail_nomem_policy:
59565- kmem_cache_free(vm_area_cachep, tmp);
59566-fail_nomem:
59567- retval = -ENOMEM;
59568- vm_unacct_memory(charge);
59569- goto out;
59570 }
59571
59572 static inline int mm_alloc_pgd(struct mm_struct * mm)
59573@@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
59574 spin_unlock(&fs->lock);
59575 return -EAGAIN;
59576 }
59577- fs->users++;
59578+ atomic_inc(&fs->users);
59579 spin_unlock(&fs->lock);
59580 return 0;
59581 }
59582 tsk->fs = copy_fs_struct(fs);
59583 if (!tsk->fs)
59584 return -ENOMEM;
59585+ gr_set_chroot_entries(tsk, &tsk->fs->root);
59586 return 0;
59587 }
59588
59589@@ -1104,12 +1142,16 @@ static struct task_struct *copy_process(
59590 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
59591 #endif
59592 retval = -EAGAIN;
59593+
59594+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
59595+
59596 if (atomic_read(&p->real_cred->user->processes) >=
59597 task_rlimit(p, RLIMIT_NPROC)) {
59598- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
59599- p->real_cred->user != INIT_USER)
59600+ if (p->real_cred->user != INIT_USER &&
59601+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
59602 goto bad_fork_free;
59603 }
59604+ current->flags &= ~PF_NPROC_EXCEEDED;
59605
59606 retval = copy_creds(p, clone_flags);
59607 if (retval < 0)
59608@@ -1250,6 +1292,8 @@ static struct task_struct *copy_process(
59609 if (clone_flags & CLONE_THREAD)
59610 p->tgid = current->tgid;
59611
59612+ gr_copy_label(p);
59613+
59614 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
59615 /*
59616 * Clear TID on mm_release()?
59617@@ -1414,6 +1458,8 @@ bad_fork_cleanup_count:
59618 bad_fork_free:
59619 free_task(p);
59620 fork_out:
59621+ gr_log_forkfail(retval);
59622+
59623 return ERR_PTR(retval);
59624 }
59625
59626@@ -1502,6 +1548,8 @@ long do_fork(unsigned long clone_flags,
59627 if (clone_flags & CLONE_PARENT_SETTID)
59628 put_user(nr, parent_tidptr);
59629
59630+ gr_handle_brute_check();
59631+
59632 if (clone_flags & CLONE_VFORK) {
59633 p->vfork_done = &vfork;
59634 init_completion(&vfork);
59635@@ -1610,7 +1658,7 @@ static int unshare_fs(unsigned long unsh
59636 return 0;
59637
59638 /* don't need lock here; in the worst case we'll do useless copy */
59639- if (fs->users == 1)
59640+ if (atomic_read(&fs->users) == 1)
59641 return 0;
59642
59643 *new_fsp = copy_fs_struct(fs);
59644@@ -1697,7 +1745,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
59645 fs = current->fs;
59646 spin_lock(&fs->lock);
59647 current->fs = new_fs;
59648- if (--fs->users)
59649+ gr_set_chroot_entries(current, &current->fs->root);
59650+ if (atomic_dec_return(&fs->users))
59651 new_fs = NULL;
59652 else
59653 new_fs = fs;
59654diff -urNp linux-3.0.4/kernel/futex.c linux-3.0.4/kernel/futex.c
59655--- linux-3.0.4/kernel/futex.c 2011-08-23 21:44:40.000000000 -0400
59656+++ linux-3.0.4/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
59657@@ -54,6 +54,7 @@
59658 #include <linux/mount.h>
59659 #include <linux/pagemap.h>
59660 #include <linux/syscalls.h>
59661+#include <linux/ptrace.h>
59662 #include <linux/signal.h>
59663 #include <linux/module.h>
59664 #include <linux/magic.h>
59665@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
59666 struct page *page, *page_head;
59667 int err, ro = 0;
59668
59669+#ifdef CONFIG_PAX_SEGMEXEC
59670+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
59671+ return -EFAULT;
59672+#endif
59673+
59674 /*
59675 * The futex address must be "naturally" aligned.
59676 */
59677@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
59678 struct futex_q q = futex_q_init;
59679 int ret;
59680
59681+ pax_track_stack();
59682+
59683 if (!bitset)
59684 return -EINVAL;
59685 q.bitset = bitset;
59686@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
59687 struct futex_q q = futex_q_init;
59688 int res, ret;
59689
59690+ pax_track_stack();
59691+
59692 if (!bitset)
59693 return -EINVAL;
59694
59695@@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59696 {
59697 struct robust_list_head __user *head;
59698 unsigned long ret;
59699+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59700 const struct cred *cred = current_cred(), *pcred;
59701+#endif
59702
59703 if (!futex_cmpxchg_enabled)
59704 return -ENOSYS;
59705@@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59706 if (!p)
59707 goto err_unlock;
59708 ret = -EPERM;
59709+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59710+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
59711+ goto err_unlock;
59712+#else
59713 pcred = __task_cred(p);
59714 /* If victim is in different user_ns, then uids are not
59715 comparable, so we must have CAP_SYS_PTRACE */
59716@@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59717 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
59718 goto err_unlock;
59719 ok:
59720+#endif
59721 head = p->robust_list;
59722 rcu_read_unlock();
59723 }
59724@@ -2712,6 +2729,7 @@ static int __init futex_init(void)
59725 {
59726 u32 curval;
59727 int i;
59728+ mm_segment_t oldfs;
59729
59730 /*
59731 * This will fail and we want it. Some arch implementations do
59732@@ -2723,8 +2741,11 @@ static int __init futex_init(void)
59733 * implementation, the non-functional ones will return
59734 * -ENOSYS.
59735 */
59736+ oldfs = get_fs();
59737+ set_fs(USER_DS);
59738 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
59739 futex_cmpxchg_enabled = 1;
59740+ set_fs(oldfs);
59741
59742 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
59743 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
59744diff -urNp linux-3.0.4/kernel/futex_compat.c linux-3.0.4/kernel/futex_compat.c
59745--- linux-3.0.4/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
59746+++ linux-3.0.4/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
59747@@ -10,6 +10,7 @@
59748 #include <linux/compat.h>
59749 #include <linux/nsproxy.h>
59750 #include <linux/futex.h>
59751+#include <linux/ptrace.h>
59752
59753 #include <asm/uaccess.h>
59754
59755@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
59756 {
59757 struct compat_robust_list_head __user *head;
59758 unsigned long ret;
59759- const struct cred *cred = current_cred(), *pcred;
59760+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59761+ const struct cred *cred = current_cred();
59762+ const struct cred *pcred;
59763+#endif
59764
59765 if (!futex_cmpxchg_enabled)
59766 return -ENOSYS;
59767@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
59768 if (!p)
59769 goto err_unlock;
59770 ret = -EPERM;
59771+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59772+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
59773+ goto err_unlock;
59774+#else
59775 pcred = __task_cred(p);
59776 /* If victim is in different user_ns, then uids are not
59777 comparable, so we must have CAP_SYS_PTRACE */
59778@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
59779 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
59780 goto err_unlock;
59781 ok:
59782+#endif
59783 head = p->compat_robust_list;
59784 rcu_read_unlock();
59785 }
59786diff -urNp linux-3.0.4/kernel/gcov/base.c linux-3.0.4/kernel/gcov/base.c
59787--- linux-3.0.4/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
59788+++ linux-3.0.4/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
59789@@ -102,11 +102,6 @@ void gcov_enable_events(void)
59790 }
59791
59792 #ifdef CONFIG_MODULES
59793-static inline int within(void *addr, void *start, unsigned long size)
59794-{
59795- return ((addr >= start) && (addr < start + size));
59796-}
59797-
59798 /* Update list and generate events when modules are unloaded. */
59799 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
59800 void *data)
59801@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
59802 prev = NULL;
59803 /* Remove entries located in module from linked list. */
59804 for (info = gcov_info_head; info; info = info->next) {
59805- if (within(info, mod->module_core, mod->core_size)) {
59806+ if (within_module_core_rw((unsigned long)info, mod)) {
59807 if (prev)
59808 prev->next = info->next;
59809 else
59810diff -urNp linux-3.0.4/kernel/hrtimer.c linux-3.0.4/kernel/hrtimer.c
59811--- linux-3.0.4/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
59812+++ linux-3.0.4/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
59813@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
59814 local_irq_restore(flags);
59815 }
59816
59817-static void run_hrtimer_softirq(struct softirq_action *h)
59818+static void run_hrtimer_softirq(void)
59819 {
59820 hrtimer_peek_ahead_timers();
59821 }
59822diff -urNp linux-3.0.4/kernel/jump_label.c linux-3.0.4/kernel/jump_label.c
59823--- linux-3.0.4/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
59824+++ linux-3.0.4/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
59825@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
59826
59827 size = (((unsigned long)stop - (unsigned long)start)
59828 / sizeof(struct jump_entry));
59829+ pax_open_kernel();
59830 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
59831+ pax_close_kernel();
59832 }
59833
59834 static void jump_label_update(struct jump_label_key *key, int enable);
59835@@ -297,10 +299,12 @@ static void jump_label_invalidate_module
59836 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
59837 struct jump_entry *iter;
59838
59839+ pax_open_kernel();
59840 for (iter = iter_start; iter < iter_stop; iter++) {
59841 if (within_module_init(iter->code, mod))
59842 iter->code = 0;
59843 }
59844+ pax_close_kernel();
59845 }
59846
59847 static int
59848diff -urNp linux-3.0.4/kernel/kallsyms.c linux-3.0.4/kernel/kallsyms.c
59849--- linux-3.0.4/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
59850+++ linux-3.0.4/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
59851@@ -11,6 +11,9 @@
59852 * Changed the compression method from stem compression to "table lookup"
59853 * compression (see scripts/kallsyms.c for a more complete description)
59854 */
59855+#ifdef CONFIG_GRKERNSEC_HIDESYM
59856+#define __INCLUDED_BY_HIDESYM 1
59857+#endif
59858 #include <linux/kallsyms.h>
59859 #include <linux/module.h>
59860 #include <linux/init.h>
59861@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
59862
59863 static inline int is_kernel_inittext(unsigned long addr)
59864 {
59865+ if (system_state != SYSTEM_BOOTING)
59866+ return 0;
59867+
59868 if (addr >= (unsigned long)_sinittext
59869 && addr <= (unsigned long)_einittext)
59870 return 1;
59871 return 0;
59872 }
59873
59874+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59875+#ifdef CONFIG_MODULES
59876+static inline int is_module_text(unsigned long addr)
59877+{
59878+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
59879+ return 1;
59880+
59881+ addr = ktla_ktva(addr);
59882+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
59883+}
59884+#else
59885+static inline int is_module_text(unsigned long addr)
59886+{
59887+ return 0;
59888+}
59889+#endif
59890+#endif
59891+
59892 static inline int is_kernel_text(unsigned long addr)
59893 {
59894 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
59895@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
59896
59897 static inline int is_kernel(unsigned long addr)
59898 {
59899+
59900+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59901+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
59902+ return 1;
59903+
59904+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
59905+#else
59906 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
59907+#endif
59908+
59909 return 1;
59910 return in_gate_area_no_mm(addr);
59911 }
59912
59913 static int is_ksym_addr(unsigned long addr)
59914 {
59915+
59916+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59917+ if (is_module_text(addr))
59918+ return 0;
59919+#endif
59920+
59921 if (all_var)
59922 return is_kernel(addr);
59923
59924@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
59925
59926 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
59927 {
59928- iter->name[0] = '\0';
59929 iter->nameoff = get_symbol_offset(new_pos);
59930 iter->pos = new_pos;
59931 }
59932@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
59933 {
59934 struct kallsym_iter *iter = m->private;
59935
59936+#ifdef CONFIG_GRKERNSEC_HIDESYM
59937+ if (current_uid())
59938+ return 0;
59939+#endif
59940+
59941 /* Some debugging symbols have no name. Ignore them. */
59942 if (!iter->name[0])
59943 return 0;
59944@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
59945 struct kallsym_iter *iter;
59946 int ret;
59947
59948- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
59949+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
59950 if (!iter)
59951 return -ENOMEM;
59952 reset_iter(iter, 0);
59953diff -urNp linux-3.0.4/kernel/kmod.c linux-3.0.4/kernel/kmod.c
59954--- linux-3.0.4/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
59955+++ linux-3.0.4/kernel/kmod.c 2011-08-23 21:48:14.000000000 -0400
59956@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
59957 * If module auto-loading support is disabled then this function
59958 * becomes a no-operation.
59959 */
59960-int __request_module(bool wait, const char *fmt, ...)
59961+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
59962 {
59963- va_list args;
59964 char module_name[MODULE_NAME_LEN];
59965 unsigned int max_modprobes;
59966 int ret;
59967- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
59968+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
59969 static char *envp[] = { "HOME=/",
59970 "TERM=linux",
59971 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
59972@@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
59973 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
59974 static int kmod_loop_msg;
59975
59976- va_start(args, fmt);
59977- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
59978- va_end(args);
59979+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
59980 if (ret >= MODULE_NAME_LEN)
59981 return -ENAMETOOLONG;
59982
59983@@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
59984 if (ret)
59985 return ret;
59986
59987+#ifdef CONFIG_GRKERNSEC_MODHARDEN
59988+ if (!current_uid()) {
59989+ /* hack to workaround consolekit/udisks stupidity */
59990+ read_lock(&tasklist_lock);
59991+ if (!strcmp(current->comm, "mount") &&
59992+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
59993+ read_unlock(&tasklist_lock);
59994+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
59995+ return -EPERM;
59996+ }
59997+ read_unlock(&tasklist_lock);
59998+ }
59999+#endif
60000+
60001 /* If modprobe needs a service that is in a module, we get a recursive
60002 * loop. Limit the number of running kmod threads to max_threads/2 or
60003 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
60004@@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
60005 atomic_dec(&kmod_concurrent);
60006 return ret;
60007 }
60008+
60009+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
60010+{
60011+ va_list args;
60012+ int ret;
60013+
60014+ va_start(args, fmt);
60015+ ret = ____request_module(wait, module_param, fmt, args);
60016+ va_end(args);
60017+
60018+ return ret;
60019+}
60020+
60021+int __request_module(bool wait, const char *fmt, ...)
60022+{
60023+ va_list args;
60024+ int ret;
60025+
60026+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60027+ if (current_uid()) {
60028+ char module_param[MODULE_NAME_LEN];
60029+
60030+ memset(module_param, 0, sizeof(module_param));
60031+
60032+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
60033+
60034+ va_start(args, fmt);
60035+ ret = ____request_module(wait, module_param, fmt, args);
60036+ va_end(args);
60037+
60038+ return ret;
60039+ }
60040+#endif
60041+
60042+ va_start(args, fmt);
60043+ ret = ____request_module(wait, NULL, fmt, args);
60044+ va_end(args);
60045+
60046+ return ret;
60047+}
60048+
60049 EXPORT_SYMBOL(__request_module);
60050 #endif /* CONFIG_MODULES */
60051
60052diff -urNp linux-3.0.4/kernel/kprobes.c linux-3.0.4/kernel/kprobes.c
60053--- linux-3.0.4/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
60054+++ linux-3.0.4/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
60055@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
60056 * kernel image and loaded module images reside. This is required
60057 * so x86_64 can correctly handle the %rip-relative fixups.
60058 */
60059- kip->insns = module_alloc(PAGE_SIZE);
60060+ kip->insns = module_alloc_exec(PAGE_SIZE);
60061 if (!kip->insns) {
60062 kfree(kip);
60063 return NULL;
60064@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
60065 */
60066 if (!list_is_singular(&kip->list)) {
60067 list_del(&kip->list);
60068- module_free(NULL, kip->insns);
60069+ module_free_exec(NULL, kip->insns);
60070 kfree(kip);
60071 }
60072 return 1;
60073@@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
60074 {
60075 int i, err = 0;
60076 unsigned long offset = 0, size = 0;
60077- char *modname, namebuf[128];
60078+ char *modname, namebuf[KSYM_NAME_LEN];
60079 const char *symbol_name;
60080 void *addr;
60081 struct kprobe_blackpoint *kb;
60082@@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
60083 const char *sym = NULL;
60084 unsigned int i = *(loff_t *) v;
60085 unsigned long offset = 0;
60086- char *modname, namebuf[128];
60087+ char *modname, namebuf[KSYM_NAME_LEN];
60088
60089 head = &kprobe_table[i];
60090 preempt_disable();
60091diff -urNp linux-3.0.4/kernel/lockdep.c linux-3.0.4/kernel/lockdep.c
60092--- linux-3.0.4/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
60093+++ linux-3.0.4/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
60094@@ -583,6 +583,10 @@ static int static_obj(void *obj)
60095 end = (unsigned long) &_end,
60096 addr = (unsigned long) obj;
60097
60098+#ifdef CONFIG_PAX_KERNEXEC
60099+ start = ktla_ktva(start);
60100+#endif
60101+
60102 /*
60103 * static variable?
60104 */
60105@@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
60106 if (!static_obj(lock->key)) {
60107 debug_locks_off();
60108 printk("INFO: trying to register non-static key.\n");
60109+ printk("lock:%pS key:%pS.\n", lock, lock->key);
60110 printk("the code is fine but needs lockdep annotation.\n");
60111 printk("turning off the locking correctness validator.\n");
60112 dump_stack();
60113@@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
60114 if (!class)
60115 return 0;
60116 }
60117- atomic_inc((atomic_t *)&class->ops);
60118+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
60119 if (very_verbose(class)) {
60120 printk("\nacquire class [%p] %s", class->key, class->name);
60121 if (class->name_version > 1)
60122diff -urNp linux-3.0.4/kernel/lockdep_proc.c linux-3.0.4/kernel/lockdep_proc.c
60123--- linux-3.0.4/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
60124+++ linux-3.0.4/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
60125@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
60126
60127 static void print_name(struct seq_file *m, struct lock_class *class)
60128 {
60129- char str[128];
60130+ char str[KSYM_NAME_LEN];
60131 const char *name = class->name;
60132
60133 if (!name) {
60134diff -urNp linux-3.0.4/kernel/module.c linux-3.0.4/kernel/module.c
60135--- linux-3.0.4/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
60136+++ linux-3.0.4/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
60137@@ -58,6 +58,7 @@
60138 #include <linux/jump_label.h>
60139 #include <linux/pfn.h>
60140 #include <linux/bsearch.h>
60141+#include <linux/grsecurity.h>
60142
60143 #define CREATE_TRACE_POINTS
60144 #include <trace/events/module.h>
60145@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
60146
60147 /* Bounds of module allocation, for speeding __module_address.
60148 * Protected by module_mutex. */
60149-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
60150+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
60151+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
60152
60153 int register_module_notifier(struct notifier_block * nb)
60154 {
60155@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
60156 return true;
60157
60158 list_for_each_entry_rcu(mod, &modules, list) {
60159- struct symsearch arr[] = {
60160+ struct symsearch modarr[] = {
60161 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
60162 NOT_GPL_ONLY, false },
60163 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
60164@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
60165 #endif
60166 };
60167
60168- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
60169+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
60170 return true;
60171 }
60172 return false;
60173@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
60174 static int percpu_modalloc(struct module *mod,
60175 unsigned long size, unsigned long align)
60176 {
60177- if (align > PAGE_SIZE) {
60178+ if (align-1 >= PAGE_SIZE) {
60179 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
60180 mod->name, align, PAGE_SIZE);
60181 align = PAGE_SIZE;
60182@@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
60183 */
60184 #ifdef CONFIG_SYSFS
60185
60186-#ifdef CONFIG_KALLSYMS
60187+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60188 static inline bool sect_empty(const Elf_Shdr *sect)
60189 {
60190 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
60191@@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
60192
60193 static void unset_module_core_ro_nx(struct module *mod)
60194 {
60195- set_page_attributes(mod->module_core + mod->core_text_size,
60196- mod->module_core + mod->core_size,
60197+ set_page_attributes(mod->module_core_rw,
60198+ mod->module_core_rw + mod->core_size_rw,
60199 set_memory_x);
60200- set_page_attributes(mod->module_core,
60201- mod->module_core + mod->core_ro_size,
60202+ set_page_attributes(mod->module_core_rx,
60203+ mod->module_core_rx + mod->core_size_rx,
60204 set_memory_rw);
60205 }
60206
60207 static void unset_module_init_ro_nx(struct module *mod)
60208 {
60209- set_page_attributes(mod->module_init + mod->init_text_size,
60210- mod->module_init + mod->init_size,
60211+ set_page_attributes(mod->module_init_rw,
60212+ mod->module_init_rw + mod->init_size_rw,
60213 set_memory_x);
60214- set_page_attributes(mod->module_init,
60215- mod->module_init + mod->init_ro_size,
60216+ set_page_attributes(mod->module_init_rx,
60217+ mod->module_init_rx + mod->init_size_rx,
60218 set_memory_rw);
60219 }
60220
60221@@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
60222
60223 mutex_lock(&module_mutex);
60224 list_for_each_entry_rcu(mod, &modules, list) {
60225- if ((mod->module_core) && (mod->core_text_size)) {
60226- set_page_attributes(mod->module_core,
60227- mod->module_core + mod->core_text_size,
60228+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
60229+ set_page_attributes(mod->module_core_rx,
60230+ mod->module_core_rx + mod->core_size_rx,
60231 set_memory_rw);
60232 }
60233- if ((mod->module_init) && (mod->init_text_size)) {
60234- set_page_attributes(mod->module_init,
60235- mod->module_init + mod->init_text_size,
60236+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
60237+ set_page_attributes(mod->module_init_rx,
60238+ mod->module_init_rx + mod->init_size_rx,
60239 set_memory_rw);
60240 }
60241 }
60242@@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
60243
60244 mutex_lock(&module_mutex);
60245 list_for_each_entry_rcu(mod, &modules, list) {
60246- if ((mod->module_core) && (mod->core_text_size)) {
60247- set_page_attributes(mod->module_core,
60248- mod->module_core + mod->core_text_size,
60249+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
60250+ set_page_attributes(mod->module_core_rx,
60251+ mod->module_core_rx + mod->core_size_rx,
60252 set_memory_ro);
60253 }
60254- if ((mod->module_init) && (mod->init_text_size)) {
60255- set_page_attributes(mod->module_init,
60256- mod->module_init + mod->init_text_size,
60257+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
60258+ set_page_attributes(mod->module_init_rx,
60259+ mod->module_init_rx + mod->init_size_rx,
60260 set_memory_ro);
60261 }
60262 }
60263@@ -1722,16 +1724,19 @@ static void free_module(struct module *m
60264
60265 /* This may be NULL, but that's OK */
60266 unset_module_init_ro_nx(mod);
60267- module_free(mod, mod->module_init);
60268+ module_free(mod, mod->module_init_rw);
60269+ module_free_exec(mod, mod->module_init_rx);
60270 kfree(mod->args);
60271 percpu_modfree(mod);
60272
60273 /* Free lock-classes: */
60274- lockdep_free_key_range(mod->module_core, mod->core_size);
60275+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
60276+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
60277
60278 /* Finally, free the core (containing the module structure) */
60279 unset_module_core_ro_nx(mod);
60280- module_free(mod, mod->module_core);
60281+ module_free_exec(mod, mod->module_core_rx);
60282+ module_free(mod, mod->module_core_rw);
60283
60284 #ifdef CONFIG_MPU
60285 update_protections(current->mm);
60286@@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
60287 unsigned int i;
60288 int ret = 0;
60289 const struct kernel_symbol *ksym;
60290+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60291+ int is_fs_load = 0;
60292+ int register_filesystem_found = 0;
60293+ char *p;
60294+
60295+ p = strstr(mod->args, "grsec_modharden_fs");
60296+ if (p) {
60297+ char *endptr = p + strlen("grsec_modharden_fs");
60298+ /* copy \0 as well */
60299+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
60300+ is_fs_load = 1;
60301+ }
60302+#endif
60303
60304 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
60305 const char *name = info->strtab + sym[i].st_name;
60306
60307+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60308+ /* it's a real shame this will never get ripped and copied
60309+ upstream! ;(
60310+ */
60311+ if (is_fs_load && !strcmp(name, "register_filesystem"))
60312+ register_filesystem_found = 1;
60313+#endif
60314+
60315 switch (sym[i].st_shndx) {
60316 case SHN_COMMON:
60317 /* We compiled with -fno-common. These are not
60318@@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
60319 ksym = resolve_symbol_wait(mod, info, name);
60320 /* Ok if resolved. */
60321 if (ksym && !IS_ERR(ksym)) {
60322+ pax_open_kernel();
60323 sym[i].st_value = ksym->value;
60324+ pax_close_kernel();
60325 break;
60326 }
60327
60328@@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
60329 secbase = (unsigned long)mod_percpu(mod);
60330 else
60331 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
60332+ pax_open_kernel();
60333 sym[i].st_value += secbase;
60334+ pax_close_kernel();
60335 break;
60336 }
60337 }
60338
60339+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60340+ if (is_fs_load && !register_filesystem_found) {
60341+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
60342+ ret = -EPERM;
60343+ }
60344+#endif
60345+
60346 return ret;
60347 }
60348
60349@@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
60350 || s->sh_entsize != ~0UL
60351 || strstarts(sname, ".init"))
60352 continue;
60353- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
60354+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60355+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
60356+ else
60357+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
60358 DEBUGP("\t%s\n", name);
60359 }
60360- switch (m) {
60361- case 0: /* executable */
60362- mod->core_size = debug_align(mod->core_size);
60363- mod->core_text_size = mod->core_size;
60364- break;
60365- case 1: /* RO: text and ro-data */
60366- mod->core_size = debug_align(mod->core_size);
60367- mod->core_ro_size = mod->core_size;
60368- break;
60369- case 3: /* whole core */
60370- mod->core_size = debug_align(mod->core_size);
60371- break;
60372- }
60373 }
60374
60375 DEBUGP("Init section allocation order:\n");
60376@@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
60377 || s->sh_entsize != ~0UL
60378 || !strstarts(sname, ".init"))
60379 continue;
60380- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
60381- | INIT_OFFSET_MASK);
60382+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60383+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
60384+ else
60385+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
60386+ s->sh_entsize |= INIT_OFFSET_MASK;
60387 DEBUGP("\t%s\n", sname);
60388 }
60389- switch (m) {
60390- case 0: /* executable */
60391- mod->init_size = debug_align(mod->init_size);
60392- mod->init_text_size = mod->init_size;
60393- break;
60394- case 1: /* RO: text and ro-data */
60395- mod->init_size = debug_align(mod->init_size);
60396- mod->init_ro_size = mod->init_size;
60397- break;
60398- case 3: /* whole init */
60399- mod->init_size = debug_align(mod->init_size);
60400- break;
60401- }
60402 }
60403 }
60404
60405@@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
60406
60407 /* Put symbol section at end of init part of module. */
60408 symsect->sh_flags |= SHF_ALLOC;
60409- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
60410+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
60411 info->index.sym) | INIT_OFFSET_MASK;
60412 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
60413
60414@@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
60415 }
60416
60417 /* Append room for core symbols at end of core part. */
60418- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
60419- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
60420+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
60421+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
60422
60423 /* Put string table section at end of init part of module. */
60424 strsect->sh_flags |= SHF_ALLOC;
60425- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
60426+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
60427 info->index.str) | INIT_OFFSET_MASK;
60428 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
60429
60430 /* Append room for core symbols' strings at end of core part. */
60431- info->stroffs = mod->core_size;
60432+ info->stroffs = mod->core_size_rx;
60433 __set_bit(0, info->strmap);
60434- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
60435+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
60436 }
60437
60438 static void add_kallsyms(struct module *mod, const struct load_info *info)
60439@@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
60440 /* Make sure we get permanent strtab: don't use info->strtab. */
60441 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
60442
60443+ pax_open_kernel();
60444+
60445 /* Set types up while we still have access to sections. */
60446 for (i = 0; i < mod->num_symtab; i++)
60447 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
60448
60449- mod->core_symtab = dst = mod->module_core + info->symoffs;
60450+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
60451 src = mod->symtab;
60452 *dst = *src;
60453 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
60454@@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
60455 }
60456 mod->core_num_syms = ndst;
60457
60458- mod->core_strtab = s = mod->module_core + info->stroffs;
60459+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
60460 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
60461 if (test_bit(i, info->strmap))
60462 *++s = mod->strtab[i];
60463+
60464+ pax_close_kernel();
60465 }
60466 #else
60467 static inline void layout_symtab(struct module *mod, struct load_info *info)
60468@@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
60469 ddebug_remove_module(debug->modname);
60470 }
60471
60472-static void *module_alloc_update_bounds(unsigned long size)
60473+static void *module_alloc_update_bounds_rw(unsigned long size)
60474 {
60475 void *ret = module_alloc(size);
60476
60477 if (ret) {
60478 mutex_lock(&module_mutex);
60479 /* Update module bounds. */
60480- if ((unsigned long)ret < module_addr_min)
60481- module_addr_min = (unsigned long)ret;
60482- if ((unsigned long)ret + size > module_addr_max)
60483- module_addr_max = (unsigned long)ret + size;
60484+ if ((unsigned long)ret < module_addr_min_rw)
60485+ module_addr_min_rw = (unsigned long)ret;
60486+ if ((unsigned long)ret + size > module_addr_max_rw)
60487+ module_addr_max_rw = (unsigned long)ret + size;
60488+ mutex_unlock(&module_mutex);
60489+ }
60490+ return ret;
60491+}
60492+
60493+static void *module_alloc_update_bounds_rx(unsigned long size)
60494+{
60495+ void *ret = module_alloc_exec(size);
60496+
60497+ if (ret) {
60498+ mutex_lock(&module_mutex);
60499+ /* Update module bounds. */
60500+ if ((unsigned long)ret < module_addr_min_rx)
60501+ module_addr_min_rx = (unsigned long)ret;
60502+ if ((unsigned long)ret + size > module_addr_max_rx)
60503+ module_addr_max_rx = (unsigned long)ret + size;
60504 mutex_unlock(&module_mutex);
60505 }
60506 return ret;
60507@@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
60508 void *ptr;
60509
60510 /* Do the allocs. */
60511- ptr = module_alloc_update_bounds(mod->core_size);
60512+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
60513 /*
60514 * The pointer to this block is stored in the module structure
60515 * which is inside the block. Just mark it as not being a
60516@@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
60517 if (!ptr)
60518 return -ENOMEM;
60519
60520- memset(ptr, 0, mod->core_size);
60521- mod->module_core = ptr;
60522+ memset(ptr, 0, mod->core_size_rw);
60523+ mod->module_core_rw = ptr;
60524
60525- ptr = module_alloc_update_bounds(mod->init_size);
60526+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
60527 /*
60528 * The pointer to this block is stored in the module structure
60529 * which is inside the block. This block doesn't need to be
60530 * scanned as it contains data and code that will be freed
60531 * after the module is initialized.
60532 */
60533- kmemleak_ignore(ptr);
60534- if (!ptr && mod->init_size) {
60535- module_free(mod, mod->module_core);
60536+ kmemleak_not_leak(ptr);
60537+ if (!ptr && mod->init_size_rw) {
60538+ module_free(mod, mod->module_core_rw);
60539 return -ENOMEM;
60540 }
60541- memset(ptr, 0, mod->init_size);
60542- mod->module_init = ptr;
60543+ memset(ptr, 0, mod->init_size_rw);
60544+ mod->module_init_rw = ptr;
60545+
60546+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
60547+ kmemleak_not_leak(ptr);
60548+ if (!ptr) {
60549+ module_free(mod, mod->module_init_rw);
60550+ module_free(mod, mod->module_core_rw);
60551+ return -ENOMEM;
60552+ }
60553+
60554+ pax_open_kernel();
60555+ memset(ptr, 0, mod->core_size_rx);
60556+ pax_close_kernel();
60557+ mod->module_core_rx = ptr;
60558+
60559+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
60560+ kmemleak_not_leak(ptr);
60561+ if (!ptr && mod->init_size_rx) {
60562+ module_free_exec(mod, mod->module_core_rx);
60563+ module_free(mod, mod->module_init_rw);
60564+ module_free(mod, mod->module_core_rw);
60565+ return -ENOMEM;
60566+ }
60567+
60568+ pax_open_kernel();
60569+ memset(ptr, 0, mod->init_size_rx);
60570+ pax_close_kernel();
60571+ mod->module_init_rx = ptr;
60572
60573 /* Transfer each section which specifies SHF_ALLOC */
60574 DEBUGP("final section addresses:\n");
60575@@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
60576 if (!(shdr->sh_flags & SHF_ALLOC))
60577 continue;
60578
60579- if (shdr->sh_entsize & INIT_OFFSET_MASK)
60580- dest = mod->module_init
60581- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
60582- else
60583- dest = mod->module_core + shdr->sh_entsize;
60584+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
60585+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
60586+ dest = mod->module_init_rw
60587+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
60588+ else
60589+ dest = mod->module_init_rx
60590+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
60591+ } else {
60592+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
60593+ dest = mod->module_core_rw + shdr->sh_entsize;
60594+ else
60595+ dest = mod->module_core_rx + shdr->sh_entsize;
60596+ }
60597+
60598+ if (shdr->sh_type != SHT_NOBITS) {
60599+
60600+#ifdef CONFIG_PAX_KERNEXEC
60601+#ifdef CONFIG_X86_64
60602+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
60603+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
60604+#endif
60605+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
60606+ pax_open_kernel();
60607+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
60608+ pax_close_kernel();
60609+ } else
60610+#endif
60611
60612- if (shdr->sh_type != SHT_NOBITS)
60613 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
60614+ }
60615 /* Update sh_addr to point to copy in image. */
60616- shdr->sh_addr = (unsigned long)dest;
60617+
60618+#ifdef CONFIG_PAX_KERNEXEC
60619+ if (shdr->sh_flags & SHF_EXECINSTR)
60620+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
60621+ else
60622+#endif
60623+
60624+ shdr->sh_addr = (unsigned long)dest;
60625 DEBUGP("\t0x%lx %s\n",
60626 shdr->sh_addr, info->secstrings + shdr->sh_name);
60627 }
60628@@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
60629 * Do it before processing of module parameters, so the module
60630 * can provide parameter accessor functions of its own.
60631 */
60632- if (mod->module_init)
60633- flush_icache_range((unsigned long)mod->module_init,
60634- (unsigned long)mod->module_init
60635- + mod->init_size);
60636- flush_icache_range((unsigned long)mod->module_core,
60637- (unsigned long)mod->module_core + mod->core_size);
60638+ if (mod->module_init_rx)
60639+ flush_icache_range((unsigned long)mod->module_init_rx,
60640+ (unsigned long)mod->module_init_rx
60641+ + mod->init_size_rx);
60642+ flush_icache_range((unsigned long)mod->module_core_rx,
60643+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
60644
60645 set_fs(old_fs);
60646 }
60647@@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
60648 {
60649 kfree(info->strmap);
60650 percpu_modfree(mod);
60651- module_free(mod, mod->module_init);
60652- module_free(mod, mod->module_core);
60653+ module_free_exec(mod, mod->module_init_rx);
60654+ module_free_exec(mod, mod->module_core_rx);
60655+ module_free(mod, mod->module_init_rw);
60656+ module_free(mod, mod->module_core_rw);
60657 }
60658
60659 static int post_relocation(struct module *mod, const struct load_info *info)
60660@@ -2770,9 +2865,38 @@ static struct module *load_module(void _
60661 if (err)
60662 goto free_unload;
60663
60664+ /* Now copy in args */
60665+ mod->args = strndup_user(uargs, ~0UL >> 1);
60666+ if (IS_ERR(mod->args)) {
60667+ err = PTR_ERR(mod->args);
60668+ goto free_unload;
60669+ }
60670+
60671 /* Set up MODINFO_ATTR fields */
60672 setup_modinfo(mod, &info);
60673
60674+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60675+ {
60676+ char *p, *p2;
60677+
60678+ if (strstr(mod->args, "grsec_modharden_netdev")) {
60679+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
60680+ err = -EPERM;
60681+ goto free_modinfo;
60682+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
60683+ p += strlen("grsec_modharden_normal");
60684+ p2 = strstr(p, "_");
60685+ if (p2) {
60686+ *p2 = '\0';
60687+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
60688+ *p2 = '_';
60689+ }
60690+ err = -EPERM;
60691+ goto free_modinfo;
60692+ }
60693+ }
60694+#endif
60695+
60696 /* Fix up syms, so that st_value is a pointer to location. */
60697 err = simplify_symbols(mod, &info);
60698 if (err < 0)
60699@@ -2788,13 +2912,6 @@ static struct module *load_module(void _
60700
60701 flush_module_icache(mod);
60702
60703- /* Now copy in args */
60704- mod->args = strndup_user(uargs, ~0UL >> 1);
60705- if (IS_ERR(mod->args)) {
60706- err = PTR_ERR(mod->args);
60707- goto free_arch_cleanup;
60708- }
60709-
60710 /* Mark state as coming so strong_try_module_get() ignores us. */
60711 mod->state = MODULE_STATE_COMING;
60712
60713@@ -2854,11 +2971,10 @@ static struct module *load_module(void _
60714 unlock:
60715 mutex_unlock(&module_mutex);
60716 synchronize_sched();
60717- kfree(mod->args);
60718- free_arch_cleanup:
60719 module_arch_cleanup(mod);
60720 free_modinfo:
60721 free_modinfo(mod);
60722+ kfree(mod->args);
60723 free_unload:
60724 module_unload_free(mod);
60725 free_module:
60726@@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
60727 MODULE_STATE_COMING, mod);
60728
60729 /* Set RO and NX regions for core */
60730- set_section_ro_nx(mod->module_core,
60731- mod->core_text_size,
60732- mod->core_ro_size,
60733- mod->core_size);
60734+ set_section_ro_nx(mod->module_core_rx,
60735+ mod->core_size_rx,
60736+ mod->core_size_rx,
60737+ mod->core_size_rx);
60738
60739 /* Set RO and NX regions for init */
60740- set_section_ro_nx(mod->module_init,
60741- mod->init_text_size,
60742- mod->init_ro_size,
60743- mod->init_size);
60744+ set_section_ro_nx(mod->module_init_rx,
60745+ mod->init_size_rx,
60746+ mod->init_size_rx,
60747+ mod->init_size_rx);
60748
60749 do_mod_ctors(mod);
60750 /* Start the module */
60751@@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
60752 mod->strtab = mod->core_strtab;
60753 #endif
60754 unset_module_init_ro_nx(mod);
60755- module_free(mod, mod->module_init);
60756- mod->module_init = NULL;
60757- mod->init_size = 0;
60758- mod->init_ro_size = 0;
60759- mod->init_text_size = 0;
60760+ module_free(mod, mod->module_init_rw);
60761+ module_free_exec(mod, mod->module_init_rx);
60762+ mod->module_init_rw = NULL;
60763+ mod->module_init_rx = NULL;
60764+ mod->init_size_rw = 0;
60765+ mod->init_size_rx = 0;
60766 mutex_unlock(&module_mutex);
60767
60768 return 0;
60769@@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
60770 unsigned long nextval;
60771
60772 /* At worse, next value is at end of module */
60773- if (within_module_init(addr, mod))
60774- nextval = (unsigned long)mod->module_init+mod->init_text_size;
60775+ if (within_module_init_rx(addr, mod))
60776+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
60777+ else if (within_module_init_rw(addr, mod))
60778+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
60779+ else if (within_module_core_rx(addr, mod))
60780+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
60781+ else if (within_module_core_rw(addr, mod))
60782+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
60783 else
60784- nextval = (unsigned long)mod->module_core+mod->core_text_size;
60785+ return NULL;
60786
60787 /* Scan for closest preceding symbol, and next symbol. (ELF
60788 starts real symbols at 1). */
60789@@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
60790 char buf[8];
60791
60792 seq_printf(m, "%s %u",
60793- mod->name, mod->init_size + mod->core_size);
60794+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
60795 print_unload_info(m, mod);
60796
60797 /* Informative for users. */
60798@@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
60799 mod->state == MODULE_STATE_COMING ? "Loading":
60800 "Live");
60801 /* Used by oprofile and other similar tools. */
60802- seq_printf(m, " 0x%pK", mod->module_core);
60803+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
60804
60805 /* Taints info */
60806 if (mod->taints)
60807@@ -3283,7 +3406,17 @@ static const struct file_operations proc
60808
60809 static int __init proc_modules_init(void)
60810 {
60811+#ifndef CONFIG_GRKERNSEC_HIDESYM
60812+#ifdef CONFIG_GRKERNSEC_PROC_USER
60813+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60814+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60815+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
60816+#else
60817 proc_create("modules", 0, NULL, &proc_modules_operations);
60818+#endif
60819+#else
60820+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60821+#endif
60822 return 0;
60823 }
60824 module_init(proc_modules_init);
60825@@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
60826 {
60827 struct module *mod;
60828
60829- if (addr < module_addr_min || addr > module_addr_max)
60830+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
60831+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
60832 return NULL;
60833
60834 list_for_each_entry_rcu(mod, &modules, list)
60835- if (within_module_core(addr, mod)
60836- || within_module_init(addr, mod))
60837+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
60838 return mod;
60839 return NULL;
60840 }
60841@@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
60842 */
60843 struct module *__module_text_address(unsigned long addr)
60844 {
60845- struct module *mod = __module_address(addr);
60846+ struct module *mod;
60847+
60848+#ifdef CONFIG_X86_32
60849+ addr = ktla_ktva(addr);
60850+#endif
60851+
60852+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
60853+ return NULL;
60854+
60855+ mod = __module_address(addr);
60856+
60857 if (mod) {
60858 /* Make sure it's within the text section. */
60859- if (!within(addr, mod->module_init, mod->init_text_size)
60860- && !within(addr, mod->module_core, mod->core_text_size))
60861+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
60862 mod = NULL;
60863 }
60864 return mod;
60865diff -urNp linux-3.0.4/kernel/mutex.c linux-3.0.4/kernel/mutex.c
60866--- linux-3.0.4/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
60867+++ linux-3.0.4/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
60868@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
60869 spin_lock_mutex(&lock->wait_lock, flags);
60870
60871 debug_mutex_lock_common(lock, &waiter);
60872- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
60873+ debug_mutex_add_waiter(lock, &waiter, task);
60874
60875 /* add waiting tasks to the end of the waitqueue (FIFO): */
60876 list_add_tail(&waiter.list, &lock->wait_list);
60877@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
60878 * TASK_UNINTERRUPTIBLE case.)
60879 */
60880 if (unlikely(signal_pending_state(state, task))) {
60881- mutex_remove_waiter(lock, &waiter,
60882- task_thread_info(task));
60883+ mutex_remove_waiter(lock, &waiter, task);
60884 mutex_release(&lock->dep_map, 1, ip);
60885 spin_unlock_mutex(&lock->wait_lock, flags);
60886
60887@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
60888 done:
60889 lock_acquired(&lock->dep_map, ip);
60890 /* got the lock - rejoice! */
60891- mutex_remove_waiter(lock, &waiter, current_thread_info());
60892+ mutex_remove_waiter(lock, &waiter, task);
60893 mutex_set_owner(lock);
60894
60895 /* set it to 0 if there are no waiters left: */
60896diff -urNp linux-3.0.4/kernel/mutex-debug.c linux-3.0.4/kernel/mutex-debug.c
60897--- linux-3.0.4/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
60898+++ linux-3.0.4/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
60899@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
60900 }
60901
60902 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60903- struct thread_info *ti)
60904+ struct task_struct *task)
60905 {
60906 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
60907
60908 /* Mark the current thread as blocked on the lock: */
60909- ti->task->blocked_on = waiter;
60910+ task->blocked_on = waiter;
60911 }
60912
60913 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60914- struct thread_info *ti)
60915+ struct task_struct *task)
60916 {
60917 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
60918- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
60919- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
60920- ti->task->blocked_on = NULL;
60921+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
60922+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
60923+ task->blocked_on = NULL;
60924
60925 list_del_init(&waiter->list);
60926 waiter->task = NULL;
60927diff -urNp linux-3.0.4/kernel/mutex-debug.h linux-3.0.4/kernel/mutex-debug.h
60928--- linux-3.0.4/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
60929+++ linux-3.0.4/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
60930@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
60931 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
60932 extern void debug_mutex_add_waiter(struct mutex *lock,
60933 struct mutex_waiter *waiter,
60934- struct thread_info *ti);
60935+ struct task_struct *task);
60936 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60937- struct thread_info *ti);
60938+ struct task_struct *task);
60939 extern void debug_mutex_unlock(struct mutex *lock);
60940 extern void debug_mutex_init(struct mutex *lock, const char *name,
60941 struct lock_class_key *key);
60942diff -urNp linux-3.0.4/kernel/padata.c linux-3.0.4/kernel/padata.c
60943--- linux-3.0.4/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
60944+++ linux-3.0.4/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
60945@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
60946 padata->pd = pd;
60947 padata->cb_cpu = cb_cpu;
60948
60949- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
60950- atomic_set(&pd->seq_nr, -1);
60951+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
60952+ atomic_set_unchecked(&pd->seq_nr, -1);
60953
60954- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
60955+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
60956
60957 target_cpu = padata_cpu_hash(padata);
60958 queue = per_cpu_ptr(pd->pqueue, target_cpu);
60959@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
60960 padata_init_pqueues(pd);
60961 padata_init_squeues(pd);
60962 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
60963- atomic_set(&pd->seq_nr, -1);
60964+ atomic_set_unchecked(&pd->seq_nr, -1);
60965 atomic_set(&pd->reorder_objects, 0);
60966 atomic_set(&pd->refcnt, 0);
60967 pd->pinst = pinst;
60968diff -urNp linux-3.0.4/kernel/panic.c linux-3.0.4/kernel/panic.c
60969--- linux-3.0.4/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
60970+++ linux-3.0.4/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
60971@@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
60972 const char *board;
60973
60974 printk(KERN_WARNING "------------[ cut here ]------------\n");
60975- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
60976+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
60977 board = dmi_get_system_info(DMI_PRODUCT_NAME);
60978 if (board)
60979 printk(KERN_WARNING "Hardware name: %s\n", board);
60980@@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
60981 */
60982 void __stack_chk_fail(void)
60983 {
60984- panic("stack-protector: Kernel stack is corrupted in: %p\n",
60985+ dump_stack();
60986+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
60987 __builtin_return_address(0));
60988 }
60989 EXPORT_SYMBOL(__stack_chk_fail);
60990diff -urNp linux-3.0.4/kernel/pid.c linux-3.0.4/kernel/pid.c
60991--- linux-3.0.4/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
60992+++ linux-3.0.4/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
60993@@ -33,6 +33,7 @@
60994 #include <linux/rculist.h>
60995 #include <linux/bootmem.h>
60996 #include <linux/hash.h>
60997+#include <linux/security.h>
60998 #include <linux/pid_namespace.h>
60999 #include <linux/init_task.h>
61000 #include <linux/syscalls.h>
61001@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
61002
61003 int pid_max = PID_MAX_DEFAULT;
61004
61005-#define RESERVED_PIDS 300
61006+#define RESERVED_PIDS 500
61007
61008 int pid_max_min = RESERVED_PIDS + 1;
61009 int pid_max_max = PID_MAX_LIMIT;
61010@@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
61011 */
61012 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
61013 {
61014+ struct task_struct *task;
61015+
61016 rcu_lockdep_assert(rcu_read_lock_held());
61017- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61018+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61019+
61020+ if (gr_pid_is_chrooted(task))
61021+ return NULL;
61022+
61023+ return task;
61024 }
61025
61026 struct task_struct *find_task_by_vpid(pid_t vnr)
61027@@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
61028 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
61029 }
61030
61031+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
61032+{
61033+ rcu_lockdep_assert(rcu_read_lock_held());
61034+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
61035+}
61036+
61037 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
61038 {
61039 struct pid *pid;
61040diff -urNp linux-3.0.4/kernel/posix-cpu-timers.c linux-3.0.4/kernel/posix-cpu-timers.c
61041--- linux-3.0.4/kernel/posix-cpu-timers.c 2011-07-21 22:17:23.000000000 -0400
61042+++ linux-3.0.4/kernel/posix-cpu-timers.c 2011-08-23 21:48:14.000000000 -0400
61043@@ -6,6 +6,7 @@
61044 #include <linux/posix-timers.h>
61045 #include <linux/errno.h>
61046 #include <linux/math64.h>
61047+#include <linux/security.h>
61048 #include <asm/uaccess.h>
61049 #include <linux/kernel_stat.h>
61050 #include <trace/events/timer.h>
61051@@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
61052
61053 static __init int init_posix_cpu_timers(void)
61054 {
61055- struct k_clock process = {
61056+ static struct k_clock process = {
61057 .clock_getres = process_cpu_clock_getres,
61058 .clock_get = process_cpu_clock_get,
61059 .timer_create = process_cpu_timer_create,
61060 .nsleep = process_cpu_nsleep,
61061 .nsleep_restart = process_cpu_nsleep_restart,
61062 };
61063- struct k_clock thread = {
61064+ static struct k_clock thread = {
61065 .clock_getres = thread_cpu_clock_getres,
61066 .clock_get = thread_cpu_clock_get,
61067 .timer_create = thread_cpu_timer_create,
61068diff -urNp linux-3.0.4/kernel/posix-timers.c linux-3.0.4/kernel/posix-timers.c
61069--- linux-3.0.4/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
61070+++ linux-3.0.4/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
61071@@ -43,6 +43,7 @@
61072 #include <linux/idr.h>
61073 #include <linux/posix-clock.h>
61074 #include <linux/posix-timers.h>
61075+#include <linux/grsecurity.h>
61076 #include <linux/syscalls.h>
61077 #include <linux/wait.h>
61078 #include <linux/workqueue.h>
61079@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
61080 * which we beg off on and pass to do_sys_settimeofday().
61081 */
61082
61083-static struct k_clock posix_clocks[MAX_CLOCKS];
61084+static struct k_clock *posix_clocks[MAX_CLOCKS];
61085
61086 /*
61087 * These ones are defined below.
61088@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
61089 */
61090 static __init int init_posix_timers(void)
61091 {
61092- struct k_clock clock_realtime = {
61093+ static struct k_clock clock_realtime = {
61094 .clock_getres = hrtimer_get_res,
61095 .clock_get = posix_clock_realtime_get,
61096 .clock_set = posix_clock_realtime_set,
61097@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
61098 .timer_get = common_timer_get,
61099 .timer_del = common_timer_del,
61100 };
61101- struct k_clock clock_monotonic = {
61102+ static struct k_clock clock_monotonic = {
61103 .clock_getres = hrtimer_get_res,
61104 .clock_get = posix_ktime_get_ts,
61105 .nsleep = common_nsleep,
61106@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
61107 .timer_get = common_timer_get,
61108 .timer_del = common_timer_del,
61109 };
61110- struct k_clock clock_monotonic_raw = {
61111+ static struct k_clock clock_monotonic_raw = {
61112 .clock_getres = hrtimer_get_res,
61113 .clock_get = posix_get_monotonic_raw,
61114 };
61115- struct k_clock clock_realtime_coarse = {
61116+ static struct k_clock clock_realtime_coarse = {
61117 .clock_getres = posix_get_coarse_res,
61118 .clock_get = posix_get_realtime_coarse,
61119 };
61120- struct k_clock clock_monotonic_coarse = {
61121+ static struct k_clock clock_monotonic_coarse = {
61122 .clock_getres = posix_get_coarse_res,
61123 .clock_get = posix_get_monotonic_coarse,
61124 };
61125- struct k_clock clock_boottime = {
61126+ static struct k_clock clock_boottime = {
61127 .clock_getres = hrtimer_get_res,
61128 .clock_get = posix_get_boottime,
61129 .nsleep = common_nsleep,
61130@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
61131 .timer_del = common_timer_del,
61132 };
61133
61134+ pax_track_stack();
61135+
61136 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
61137 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
61138 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
61139@@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
61140 return;
61141 }
61142
61143- posix_clocks[clock_id] = *new_clock;
61144+ posix_clocks[clock_id] = new_clock;
61145 }
61146 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
61147
61148@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
61149 return (id & CLOCKFD_MASK) == CLOCKFD ?
61150 &clock_posix_dynamic : &clock_posix_cpu;
61151
61152- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
61153+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
61154 return NULL;
61155- return &posix_clocks[id];
61156+ return posix_clocks[id];
61157 }
61158
61159 static int common_timer_create(struct k_itimer *new_timer)
61160@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
61161 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
61162 return -EFAULT;
61163
61164+ /* only the CLOCK_REALTIME clock can be set, all other clocks
61165+ have their clock_set fptr set to a nosettime dummy function
61166+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
61167+ call common_clock_set, which calls do_sys_settimeofday, which
61168+ we hook
61169+ */
61170+
61171 return kc->clock_set(which_clock, &new_tp);
61172 }
61173
61174diff -urNp linux-3.0.4/kernel/power/poweroff.c linux-3.0.4/kernel/power/poweroff.c
61175--- linux-3.0.4/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
61176+++ linux-3.0.4/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
61177@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
61178 .enable_mask = SYSRQ_ENABLE_BOOT,
61179 };
61180
61181-static int pm_sysrq_init(void)
61182+static int __init pm_sysrq_init(void)
61183 {
61184 register_sysrq_key('o', &sysrq_poweroff_op);
61185 return 0;
61186diff -urNp linux-3.0.4/kernel/power/process.c linux-3.0.4/kernel/power/process.c
61187--- linux-3.0.4/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
61188+++ linux-3.0.4/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
61189@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
61190 u64 elapsed_csecs64;
61191 unsigned int elapsed_csecs;
61192 bool wakeup = false;
61193+ bool timedout = false;
61194
61195 do_gettimeofday(&start);
61196
61197@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
61198
61199 while (true) {
61200 todo = 0;
61201+ if (time_after(jiffies, end_time))
61202+ timedout = true;
61203 read_lock(&tasklist_lock);
61204 do_each_thread(g, p) {
61205 if (frozen(p) || !freezable(p))
61206@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
61207 * try_to_stop() after schedule() in ptrace/signal
61208 * stop sees TIF_FREEZE.
61209 */
61210- if (!task_is_stopped_or_traced(p) &&
61211- !freezer_should_skip(p))
61212+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
61213 todo++;
61214+ if (timedout) {
61215+ printk(KERN_ERR "Task refusing to freeze:\n");
61216+ sched_show_task(p);
61217+ }
61218+ }
61219 } while_each_thread(g, p);
61220 read_unlock(&tasklist_lock);
61221
61222@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
61223 todo += wq_busy;
61224 }
61225
61226- if (!todo || time_after(jiffies, end_time))
61227+ if (!todo || timedout)
61228 break;
61229
61230 if (pm_wakeup_pending()) {
61231diff -urNp linux-3.0.4/kernel/printk.c linux-3.0.4/kernel/printk.c
61232--- linux-3.0.4/kernel/printk.c 2011-07-21 22:17:23.000000000 -0400
61233+++ linux-3.0.4/kernel/printk.c 2011-08-23 21:48:14.000000000 -0400
61234@@ -313,12 +313,17 @@ static int check_syslog_permissions(int
61235 if (from_file && type != SYSLOG_ACTION_OPEN)
61236 return 0;
61237
61238+#ifdef CONFIG_GRKERNSEC_DMESG
61239+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
61240+ return -EPERM;
61241+#endif
61242+
61243 if (syslog_action_restricted(type)) {
61244 if (capable(CAP_SYSLOG))
61245 return 0;
61246 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
61247 if (capable(CAP_SYS_ADMIN)) {
61248- WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
61249+ printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
61250 "but no CAP_SYSLOG (deprecated).\n");
61251 return 0;
61252 }
61253diff -urNp linux-3.0.4/kernel/profile.c linux-3.0.4/kernel/profile.c
61254--- linux-3.0.4/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
61255+++ linux-3.0.4/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
61256@@ -39,7 +39,7 @@ struct profile_hit {
61257 /* Oprofile timer tick hook */
61258 static int (*timer_hook)(struct pt_regs *) __read_mostly;
61259
61260-static atomic_t *prof_buffer;
61261+static atomic_unchecked_t *prof_buffer;
61262 static unsigned long prof_len, prof_shift;
61263
61264 int prof_on __read_mostly;
61265@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
61266 hits[i].pc = 0;
61267 continue;
61268 }
61269- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61270+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61271 hits[i].hits = hits[i].pc = 0;
61272 }
61273 }
61274@@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
61275 * Add the current hit(s) and flush the write-queue out
61276 * to the global buffer:
61277 */
61278- atomic_add(nr_hits, &prof_buffer[pc]);
61279+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
61280 for (i = 0; i < NR_PROFILE_HIT; ++i) {
61281- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61282+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61283 hits[i].pc = hits[i].hits = 0;
61284 }
61285 out:
61286@@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
61287 {
61288 unsigned long pc;
61289 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
61290- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61291+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61292 }
61293 #endif /* !CONFIG_SMP */
61294
61295@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
61296 return -EFAULT;
61297 buf++; p++; count--; read++;
61298 }
61299- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
61300+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
61301 if (copy_to_user(buf, (void *)pnt, count))
61302 return -EFAULT;
61303 read += count;
61304@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
61305 }
61306 #endif
61307 profile_discard_flip_buffers();
61308- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
61309+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
61310 return count;
61311 }
61312
61313diff -urNp linux-3.0.4/kernel/ptrace.c linux-3.0.4/kernel/ptrace.c
61314--- linux-3.0.4/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
61315+++ linux-3.0.4/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
61316@@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
61317 return ret;
61318 }
61319
61320-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
61321+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
61322+ unsigned int log)
61323 {
61324 const struct cred *cred = current_cred(), *tcred;
61325
61326@@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
61327 cred->gid == tcred->sgid &&
61328 cred->gid == tcred->gid))
61329 goto ok;
61330- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
61331+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
61332+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
61333 goto ok;
61334 rcu_read_unlock();
61335 return -EPERM;
61336@@ -167,7 +169,9 @@ ok:
61337 smp_rmb();
61338 if (task->mm)
61339 dumpable = get_dumpable(task->mm);
61340- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
61341+ if (!dumpable &&
61342+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
61343+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
61344 return -EPERM;
61345
61346 return security_ptrace_access_check(task, mode);
61347@@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
61348 {
61349 int err;
61350 task_lock(task);
61351- err = __ptrace_may_access(task, mode);
61352+ err = __ptrace_may_access(task, mode, 0);
61353+ task_unlock(task);
61354+ return !err;
61355+}
61356+
61357+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
61358+{
61359+ int err;
61360+ task_lock(task);
61361+ err = __ptrace_may_access(task, mode, 1);
61362 task_unlock(task);
61363 return !err;
61364 }
61365@@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
61366 goto out;
61367
61368 task_lock(task);
61369- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
61370+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
61371 task_unlock(task);
61372 if (retval)
61373 goto unlock_creds;
61374@@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
61375 goto unlock_tasklist;
61376
61377 task->ptrace = PT_PTRACED;
61378- if (task_ns_capable(task, CAP_SYS_PTRACE))
61379+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
61380 task->ptrace |= PT_PTRACE_CAP;
61381
61382 __ptrace_link(task, current);
61383@@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
61384 {
61385 int copied = 0;
61386
61387+ pax_track_stack();
61388+
61389 while (len > 0) {
61390 char buf[128];
61391 int this_len, retval;
61392@@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
61393 break;
61394 return -EIO;
61395 }
61396- if (copy_to_user(dst, buf, retval))
61397+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
61398 return -EFAULT;
61399 copied += retval;
61400 src += retval;
61401@@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
61402 {
61403 int copied = 0;
61404
61405+ pax_track_stack();
61406+
61407 while (len > 0) {
61408 char buf[128];
61409 int this_len, retval;
61410@@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
61411 {
61412 int ret = -EIO;
61413 siginfo_t siginfo;
61414- void __user *datavp = (void __user *) data;
61415+ void __user *datavp = (__force void __user *) data;
61416 unsigned long __user *datalp = datavp;
61417
61418+ pax_track_stack();
61419+
61420 switch (request) {
61421 case PTRACE_PEEKTEXT:
61422 case PTRACE_PEEKDATA:
61423@@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
61424 goto out;
61425 }
61426
61427+ if (gr_handle_ptrace(child, request)) {
61428+ ret = -EPERM;
61429+ goto out_put_task_struct;
61430+ }
61431+
61432 if (request == PTRACE_ATTACH) {
61433 ret = ptrace_attach(child);
61434 /*
61435 * Some architectures need to do book-keeping after
61436 * a ptrace attach.
61437 */
61438- if (!ret)
61439+ if (!ret) {
61440 arch_ptrace_attach(child);
61441+ gr_audit_ptrace(child);
61442+ }
61443 goto out_put_task_struct;
61444 }
61445
61446@@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
61447 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
61448 if (copied != sizeof(tmp))
61449 return -EIO;
61450- return put_user(tmp, (unsigned long __user *)data);
61451+ return put_user(tmp, (__force unsigned long __user *)data);
61452 }
61453
61454 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
61455@@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
61456 siginfo_t siginfo;
61457 int ret;
61458
61459+ pax_track_stack();
61460+
61461 switch (request) {
61462 case PTRACE_PEEKTEXT:
61463 case PTRACE_PEEKDATA:
61464@@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
61465 goto out;
61466 }
61467
61468+ if (gr_handle_ptrace(child, request)) {
61469+ ret = -EPERM;
61470+ goto out_put_task_struct;
61471+ }
61472+
61473 if (request == PTRACE_ATTACH) {
61474 ret = ptrace_attach(child);
61475 /*
61476 * Some architectures need to do book-keeping after
61477 * a ptrace attach.
61478 */
61479- if (!ret)
61480+ if (!ret) {
61481 arch_ptrace_attach(child);
61482+ gr_audit_ptrace(child);
61483+ }
61484 goto out_put_task_struct;
61485 }
61486
61487diff -urNp linux-3.0.4/kernel/rcutorture.c linux-3.0.4/kernel/rcutorture.c
61488--- linux-3.0.4/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
61489+++ linux-3.0.4/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
61490@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
61491 { 0 };
61492 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
61493 { 0 };
61494-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61495-static atomic_t n_rcu_torture_alloc;
61496-static atomic_t n_rcu_torture_alloc_fail;
61497-static atomic_t n_rcu_torture_free;
61498-static atomic_t n_rcu_torture_mberror;
61499-static atomic_t n_rcu_torture_error;
61500+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61501+static atomic_unchecked_t n_rcu_torture_alloc;
61502+static atomic_unchecked_t n_rcu_torture_alloc_fail;
61503+static atomic_unchecked_t n_rcu_torture_free;
61504+static atomic_unchecked_t n_rcu_torture_mberror;
61505+static atomic_unchecked_t n_rcu_torture_error;
61506 static long n_rcu_torture_boost_ktrerror;
61507 static long n_rcu_torture_boost_rterror;
61508 static long n_rcu_torture_boost_failure;
61509@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
61510
61511 spin_lock_bh(&rcu_torture_lock);
61512 if (list_empty(&rcu_torture_freelist)) {
61513- atomic_inc(&n_rcu_torture_alloc_fail);
61514+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
61515 spin_unlock_bh(&rcu_torture_lock);
61516 return NULL;
61517 }
61518- atomic_inc(&n_rcu_torture_alloc);
61519+ atomic_inc_unchecked(&n_rcu_torture_alloc);
61520 p = rcu_torture_freelist.next;
61521 list_del_init(p);
61522 spin_unlock_bh(&rcu_torture_lock);
61523@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
61524 static void
61525 rcu_torture_free(struct rcu_torture *p)
61526 {
61527- atomic_inc(&n_rcu_torture_free);
61528+ atomic_inc_unchecked(&n_rcu_torture_free);
61529 spin_lock_bh(&rcu_torture_lock);
61530 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
61531 spin_unlock_bh(&rcu_torture_lock);
61532@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
61533 i = rp->rtort_pipe_count;
61534 if (i > RCU_TORTURE_PIPE_LEN)
61535 i = RCU_TORTURE_PIPE_LEN;
61536- atomic_inc(&rcu_torture_wcount[i]);
61537+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
61538 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61539 rp->rtort_mbtest = 0;
61540 rcu_torture_free(rp);
61541@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
61542 i = rp->rtort_pipe_count;
61543 if (i > RCU_TORTURE_PIPE_LEN)
61544 i = RCU_TORTURE_PIPE_LEN;
61545- atomic_inc(&rcu_torture_wcount[i]);
61546+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
61547 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61548 rp->rtort_mbtest = 0;
61549 list_del(&rp->rtort_free);
61550@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
61551 i = old_rp->rtort_pipe_count;
61552 if (i > RCU_TORTURE_PIPE_LEN)
61553 i = RCU_TORTURE_PIPE_LEN;
61554- atomic_inc(&rcu_torture_wcount[i]);
61555+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
61556 old_rp->rtort_pipe_count++;
61557 cur_ops->deferred_free(old_rp);
61558 }
61559@@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
61560 return;
61561 }
61562 if (p->rtort_mbtest == 0)
61563- atomic_inc(&n_rcu_torture_mberror);
61564+ atomic_inc_unchecked(&n_rcu_torture_mberror);
61565 spin_lock(&rand_lock);
61566 cur_ops->read_delay(&rand);
61567 n_rcu_torture_timers++;
61568@@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
61569 continue;
61570 }
61571 if (p->rtort_mbtest == 0)
61572- atomic_inc(&n_rcu_torture_mberror);
61573+ atomic_inc_unchecked(&n_rcu_torture_mberror);
61574 cur_ops->read_delay(&rand);
61575 preempt_disable();
61576 pipe_count = p->rtort_pipe_count;
61577@@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
61578 rcu_torture_current,
61579 rcu_torture_current_version,
61580 list_empty(&rcu_torture_freelist),
61581- atomic_read(&n_rcu_torture_alloc),
61582- atomic_read(&n_rcu_torture_alloc_fail),
61583- atomic_read(&n_rcu_torture_free),
61584- atomic_read(&n_rcu_torture_mberror),
61585+ atomic_read_unchecked(&n_rcu_torture_alloc),
61586+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
61587+ atomic_read_unchecked(&n_rcu_torture_free),
61588+ atomic_read_unchecked(&n_rcu_torture_mberror),
61589 n_rcu_torture_boost_ktrerror,
61590 n_rcu_torture_boost_rterror,
61591 n_rcu_torture_boost_failure,
61592 n_rcu_torture_boosts,
61593 n_rcu_torture_timers);
61594- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
61595+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
61596 n_rcu_torture_boost_ktrerror != 0 ||
61597 n_rcu_torture_boost_rterror != 0 ||
61598 n_rcu_torture_boost_failure != 0)
61599@@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
61600 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
61601 if (i > 1) {
61602 cnt += sprintf(&page[cnt], "!!! ");
61603- atomic_inc(&n_rcu_torture_error);
61604+ atomic_inc_unchecked(&n_rcu_torture_error);
61605 WARN_ON_ONCE(1);
61606 }
61607 cnt += sprintf(&page[cnt], "Reader Pipe: ");
61608@@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
61609 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
61610 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61611 cnt += sprintf(&page[cnt], " %d",
61612- atomic_read(&rcu_torture_wcount[i]));
61613+ atomic_read_unchecked(&rcu_torture_wcount[i]));
61614 }
61615 cnt += sprintf(&page[cnt], "\n");
61616 if (cur_ops->stats)
61617@@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
61618
61619 if (cur_ops->cleanup)
61620 cur_ops->cleanup();
61621- if (atomic_read(&n_rcu_torture_error))
61622+ if (atomic_read_unchecked(&n_rcu_torture_error))
61623 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
61624 else
61625 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
61626@@ -1476,17 +1476,17 @@ rcu_torture_init(void)
61627
61628 rcu_torture_current = NULL;
61629 rcu_torture_current_version = 0;
61630- atomic_set(&n_rcu_torture_alloc, 0);
61631- atomic_set(&n_rcu_torture_alloc_fail, 0);
61632- atomic_set(&n_rcu_torture_free, 0);
61633- atomic_set(&n_rcu_torture_mberror, 0);
61634- atomic_set(&n_rcu_torture_error, 0);
61635+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
61636+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
61637+ atomic_set_unchecked(&n_rcu_torture_free, 0);
61638+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
61639+ atomic_set_unchecked(&n_rcu_torture_error, 0);
61640 n_rcu_torture_boost_ktrerror = 0;
61641 n_rcu_torture_boost_rterror = 0;
61642 n_rcu_torture_boost_failure = 0;
61643 n_rcu_torture_boosts = 0;
61644 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
61645- atomic_set(&rcu_torture_wcount[i], 0);
61646+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
61647 for_each_possible_cpu(cpu) {
61648 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61649 per_cpu(rcu_torture_count, cpu)[i] = 0;
61650diff -urNp linux-3.0.4/kernel/rcutree.c linux-3.0.4/kernel/rcutree.c
61651--- linux-3.0.4/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
61652+++ linux-3.0.4/kernel/rcutree.c 2011-08-23 21:47:56.000000000 -0400
61653@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
61654 /*
61655 * Do softirq processing for the current CPU.
61656 */
61657-static void rcu_process_callbacks(struct softirq_action *unused)
61658+static void rcu_process_callbacks(void)
61659 {
61660 __rcu_process_callbacks(&rcu_sched_state,
61661 &__get_cpu_var(rcu_sched_data));
61662diff -urNp linux-3.0.4/kernel/rcutree_plugin.h linux-3.0.4/kernel/rcutree_plugin.h
61663--- linux-3.0.4/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
61664+++ linux-3.0.4/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
61665@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
61666
61667 /* Clean up and exit. */
61668 smp_mb(); /* ensure expedited GP seen before counter increment. */
61669- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
61670+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
61671 unlock_mb_ret:
61672 mutex_unlock(&sync_rcu_preempt_exp_mutex);
61673 mb_ret:
61674@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
61675
61676 #else /* #ifndef CONFIG_SMP */
61677
61678-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
61679-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
61680+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
61681+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
61682
61683 static int synchronize_sched_expedited_cpu_stop(void *data)
61684 {
61685@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
61686 int firstsnap, s, snap, trycount = 0;
61687
61688 /* Note that atomic_inc_return() implies full memory barrier. */
61689- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
61690+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
61691 get_online_cpus();
61692
61693 /*
61694@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
61695 }
61696
61697 /* Check to see if someone else did our work for us. */
61698- s = atomic_read(&sync_sched_expedited_done);
61699+ s = atomic_read_unchecked(&sync_sched_expedited_done);
61700 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
61701 smp_mb(); /* ensure test happens before caller kfree */
61702 return;
61703@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
61704 * grace period works for us.
61705 */
61706 get_online_cpus();
61707- snap = atomic_read(&sync_sched_expedited_started) - 1;
61708+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
61709 smp_mb(); /* ensure read is before try_stop_cpus(). */
61710 }
61711
61712@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
61713 * than we did beat us to the punch.
61714 */
61715 do {
61716- s = atomic_read(&sync_sched_expedited_done);
61717+ s = atomic_read_unchecked(&sync_sched_expedited_done);
61718 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
61719 smp_mb(); /* ensure test happens before caller kfree */
61720 break;
61721 }
61722- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
61723+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
61724
61725 put_online_cpus();
61726 }
61727diff -urNp linux-3.0.4/kernel/relay.c linux-3.0.4/kernel/relay.c
61728--- linux-3.0.4/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
61729+++ linux-3.0.4/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
61730@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
61731 };
61732 ssize_t ret;
61733
61734+ pax_track_stack();
61735+
61736 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
61737 return 0;
61738 if (splice_grow_spd(pipe, &spd))
61739diff -urNp linux-3.0.4/kernel/resource.c linux-3.0.4/kernel/resource.c
61740--- linux-3.0.4/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
61741+++ linux-3.0.4/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
61742@@ -141,8 +141,18 @@ static const struct file_operations proc
61743
61744 static int __init ioresources_init(void)
61745 {
61746+#ifdef CONFIG_GRKERNSEC_PROC_ADD
61747+#ifdef CONFIG_GRKERNSEC_PROC_USER
61748+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
61749+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
61750+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61751+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
61752+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
61753+#endif
61754+#else
61755 proc_create("ioports", 0, NULL, &proc_ioports_operations);
61756 proc_create("iomem", 0, NULL, &proc_iomem_operations);
61757+#endif
61758 return 0;
61759 }
61760 __initcall(ioresources_init);
61761diff -urNp linux-3.0.4/kernel/rtmutex-tester.c linux-3.0.4/kernel/rtmutex-tester.c
61762--- linux-3.0.4/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
61763+++ linux-3.0.4/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
61764@@ -20,7 +20,7 @@
61765 #define MAX_RT_TEST_MUTEXES 8
61766
61767 static spinlock_t rttest_lock;
61768-static atomic_t rttest_event;
61769+static atomic_unchecked_t rttest_event;
61770
61771 struct test_thread_data {
61772 int opcode;
61773@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
61774
61775 case RTTEST_LOCKCONT:
61776 td->mutexes[td->opdata] = 1;
61777- td->event = atomic_add_return(1, &rttest_event);
61778+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61779 return 0;
61780
61781 case RTTEST_RESET:
61782@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
61783 return 0;
61784
61785 case RTTEST_RESETEVENT:
61786- atomic_set(&rttest_event, 0);
61787+ atomic_set_unchecked(&rttest_event, 0);
61788 return 0;
61789
61790 default:
61791@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
61792 return ret;
61793
61794 td->mutexes[id] = 1;
61795- td->event = atomic_add_return(1, &rttest_event);
61796+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61797 rt_mutex_lock(&mutexes[id]);
61798- td->event = atomic_add_return(1, &rttest_event);
61799+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61800 td->mutexes[id] = 4;
61801 return 0;
61802
61803@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
61804 return ret;
61805
61806 td->mutexes[id] = 1;
61807- td->event = atomic_add_return(1, &rttest_event);
61808+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61809 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
61810- td->event = atomic_add_return(1, &rttest_event);
61811+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61812 td->mutexes[id] = ret ? 0 : 4;
61813 return ret ? -EINTR : 0;
61814
61815@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
61816 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
61817 return ret;
61818
61819- td->event = atomic_add_return(1, &rttest_event);
61820+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61821 rt_mutex_unlock(&mutexes[id]);
61822- td->event = atomic_add_return(1, &rttest_event);
61823+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61824 td->mutexes[id] = 0;
61825 return 0;
61826
61827@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
61828 break;
61829
61830 td->mutexes[dat] = 2;
61831- td->event = atomic_add_return(1, &rttest_event);
61832+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61833 break;
61834
61835 default:
61836@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
61837 return;
61838
61839 td->mutexes[dat] = 3;
61840- td->event = atomic_add_return(1, &rttest_event);
61841+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61842 break;
61843
61844 case RTTEST_LOCKNOWAIT:
61845@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
61846 return;
61847
61848 td->mutexes[dat] = 1;
61849- td->event = atomic_add_return(1, &rttest_event);
61850+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61851 return;
61852
61853 default:
61854diff -urNp linux-3.0.4/kernel/sched_autogroup.c linux-3.0.4/kernel/sched_autogroup.c
61855--- linux-3.0.4/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
61856+++ linux-3.0.4/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
61857@@ -7,7 +7,7 @@
61858
61859 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
61860 static struct autogroup autogroup_default;
61861-static atomic_t autogroup_seq_nr;
61862+static atomic_unchecked_t autogroup_seq_nr;
61863
61864 static void __init autogroup_init(struct task_struct *init_task)
61865 {
61866@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
61867
61868 kref_init(&ag->kref);
61869 init_rwsem(&ag->lock);
61870- ag->id = atomic_inc_return(&autogroup_seq_nr);
61871+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
61872 ag->tg = tg;
61873 #ifdef CONFIG_RT_GROUP_SCHED
61874 /*
61875diff -urNp linux-3.0.4/kernel/sched.c linux-3.0.4/kernel/sched.c
61876--- linux-3.0.4/kernel/sched.c 2011-07-21 22:17:23.000000000 -0400
61877+++ linux-3.0.4/kernel/sched.c 2011-08-23 21:48:14.000000000 -0400
61878@@ -4251,6 +4251,8 @@ asmlinkage void __sched schedule(void)
61879 struct rq *rq;
61880 int cpu;
61881
61882+ pax_track_stack();
61883+
61884 need_resched:
61885 preempt_disable();
61886 cpu = smp_processor_id();
61887@@ -4934,6 +4936,8 @@ int can_nice(const struct task_struct *p
61888 /* convert nice value [19,-20] to rlimit style value [1,40] */
61889 int nice_rlim = 20 - nice;
61890
61891+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
61892+
61893 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
61894 capable(CAP_SYS_NICE));
61895 }
61896@@ -4967,7 +4971,8 @@ SYSCALL_DEFINE1(nice, int, increment)
61897 if (nice > 19)
61898 nice = 19;
61899
61900- if (increment < 0 && !can_nice(current, nice))
61901+ if (increment < 0 && (!can_nice(current, nice) ||
61902+ gr_handle_chroot_nice()))
61903 return -EPERM;
61904
61905 retval = security_task_setnice(current, nice);
61906@@ -5111,6 +5116,7 @@ recheck:
61907 unsigned long rlim_rtprio =
61908 task_rlimit(p, RLIMIT_RTPRIO);
61909
61910+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
61911 /* can't set/change the rt policy */
61912 if (policy != p->policy && !rlim_rtprio)
61913 return -EPERM;
61914diff -urNp linux-3.0.4/kernel/sched_fair.c linux-3.0.4/kernel/sched_fair.c
61915--- linux-3.0.4/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
61916+++ linux-3.0.4/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
61917@@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
61918 * run_rebalance_domains is triggered when needed from the scheduler tick.
61919 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
61920 */
61921-static void run_rebalance_domains(struct softirq_action *h)
61922+static void run_rebalance_domains(void)
61923 {
61924 int this_cpu = smp_processor_id();
61925 struct rq *this_rq = cpu_rq(this_cpu);
61926diff -urNp linux-3.0.4/kernel/signal.c linux-3.0.4/kernel/signal.c
61927--- linux-3.0.4/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
61928+++ linux-3.0.4/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
61929@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
61930
61931 int print_fatal_signals __read_mostly;
61932
61933-static void __user *sig_handler(struct task_struct *t, int sig)
61934+static __sighandler_t sig_handler(struct task_struct *t, int sig)
61935 {
61936 return t->sighand->action[sig - 1].sa.sa_handler;
61937 }
61938
61939-static int sig_handler_ignored(void __user *handler, int sig)
61940+static int sig_handler_ignored(__sighandler_t handler, int sig)
61941 {
61942 /* Is it explicitly or implicitly ignored? */
61943 return handler == SIG_IGN ||
61944@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
61945 static int sig_task_ignored(struct task_struct *t, int sig,
61946 int from_ancestor_ns)
61947 {
61948- void __user *handler;
61949+ __sighandler_t handler;
61950
61951 handler = sig_handler(t, sig);
61952
61953@@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
61954 atomic_inc(&user->sigpending);
61955 rcu_read_unlock();
61956
61957+ if (!override_rlimit)
61958+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
61959+
61960 if (override_rlimit ||
61961 atomic_read(&user->sigpending) <=
61962 task_rlimit(t, RLIMIT_SIGPENDING)) {
61963@@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
61964
61965 int unhandled_signal(struct task_struct *tsk, int sig)
61966 {
61967- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
61968+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
61969 if (is_global_init(tsk))
61970 return 1;
61971 if (handler != SIG_IGN && handler != SIG_DFL)
61972@@ -770,6 +773,13 @@ static int check_kill_permission(int sig
61973 }
61974 }
61975
61976+ /* allow glibc communication via tgkill to other threads in our
61977+ thread group */
61978+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
61979+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
61980+ && gr_handle_signal(t, sig))
61981+ return -EPERM;
61982+
61983 return security_task_kill(t, info, sig, 0);
61984 }
61985
61986@@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
61987 return send_signal(sig, info, p, 1);
61988 }
61989
61990-static int
61991+int
61992 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
61993 {
61994 return send_signal(sig, info, t, 0);
61995@@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
61996 unsigned long int flags;
61997 int ret, blocked, ignored;
61998 struct k_sigaction *action;
61999+ int is_unhandled = 0;
62000
62001 spin_lock_irqsave(&t->sighand->siglock, flags);
62002 action = &t->sighand->action[sig-1];
62003@@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
62004 }
62005 if (action->sa.sa_handler == SIG_DFL)
62006 t->signal->flags &= ~SIGNAL_UNKILLABLE;
62007+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
62008+ is_unhandled = 1;
62009 ret = specific_send_sig_info(sig, info, t);
62010 spin_unlock_irqrestore(&t->sighand->siglock, flags);
62011
62012+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
62013+ normal operation */
62014+ if (is_unhandled) {
62015+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
62016+ gr_handle_crash(t, sig);
62017+ }
62018+
62019 return ret;
62020 }
62021
62022@@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
62023 ret = check_kill_permission(sig, info, p);
62024 rcu_read_unlock();
62025
62026- if (!ret && sig)
62027+ if (!ret && sig) {
62028 ret = do_send_sig_info(sig, info, p, true);
62029+ if (!ret)
62030+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
62031+ }
62032
62033 return ret;
62034 }
62035@@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
62036 {
62037 siginfo_t info;
62038
62039+ pax_track_stack();
62040+
62041 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
62042
62043 memset(&info, 0, sizeof info);
62044@@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
62045 int error = -ESRCH;
62046
62047 rcu_read_lock();
62048- p = find_task_by_vpid(pid);
62049+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62050+ /* allow glibc communication via tgkill to other threads in our
62051+ thread group */
62052+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
62053+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
62054+ p = find_task_by_vpid_unrestricted(pid);
62055+ else
62056+#endif
62057+ p = find_task_by_vpid(pid);
62058 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
62059 error = check_kill_permission(sig, info, p);
62060 /*
62061diff -urNp linux-3.0.4/kernel/smp.c linux-3.0.4/kernel/smp.c
62062--- linux-3.0.4/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
62063+++ linux-3.0.4/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
62064@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
62065 }
62066 EXPORT_SYMBOL(smp_call_function);
62067
62068-void ipi_call_lock(void)
62069+void ipi_call_lock(void) __acquires(call_function.lock)
62070 {
62071 raw_spin_lock(&call_function.lock);
62072 }
62073
62074-void ipi_call_unlock(void)
62075+void ipi_call_unlock(void) __releases(call_function.lock)
62076 {
62077 raw_spin_unlock(&call_function.lock);
62078 }
62079
62080-void ipi_call_lock_irq(void)
62081+void ipi_call_lock_irq(void) __acquires(call_function.lock)
62082 {
62083 raw_spin_lock_irq(&call_function.lock);
62084 }
62085
62086-void ipi_call_unlock_irq(void)
62087+void ipi_call_unlock_irq(void) __releases(call_function.lock)
62088 {
62089 raw_spin_unlock_irq(&call_function.lock);
62090 }
62091diff -urNp linux-3.0.4/kernel/softirq.c linux-3.0.4/kernel/softirq.c
62092--- linux-3.0.4/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
62093+++ linux-3.0.4/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
62094@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
62095
62096 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
62097
62098-char *softirq_to_name[NR_SOFTIRQS] = {
62099+const char * const softirq_to_name[NR_SOFTIRQS] = {
62100 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
62101 "TASKLET", "SCHED", "HRTIMER", "RCU"
62102 };
62103@@ -235,7 +235,7 @@ restart:
62104 kstat_incr_softirqs_this_cpu(vec_nr);
62105
62106 trace_softirq_entry(vec_nr);
62107- h->action(h);
62108+ h->action();
62109 trace_softirq_exit(vec_nr);
62110 if (unlikely(prev_count != preempt_count())) {
62111 printk(KERN_ERR "huh, entered softirq %u %s %p"
62112@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
62113 local_irq_restore(flags);
62114 }
62115
62116-void open_softirq(int nr, void (*action)(struct softirq_action *))
62117+void open_softirq(int nr, void (*action)(void))
62118 {
62119- softirq_vec[nr].action = action;
62120+ pax_open_kernel();
62121+ *(void **)&softirq_vec[nr].action = action;
62122+ pax_close_kernel();
62123 }
62124
62125 /*
62126@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
62127
62128 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
62129
62130-static void tasklet_action(struct softirq_action *a)
62131+static void tasklet_action(void)
62132 {
62133 struct tasklet_struct *list;
62134
62135@@ -476,7 +478,7 @@ static void tasklet_action(struct softir
62136 }
62137 }
62138
62139-static void tasklet_hi_action(struct softirq_action *a)
62140+static void tasklet_hi_action(void)
62141 {
62142 struct tasklet_struct *list;
62143
62144diff -urNp linux-3.0.4/kernel/sys.c linux-3.0.4/kernel/sys.c
62145--- linux-3.0.4/kernel/sys.c 2011-08-29 23:26:14.000000000 -0400
62146+++ linux-3.0.4/kernel/sys.c 2011-08-29 23:26:27.000000000 -0400
62147@@ -158,6 +158,12 @@ static int set_one_prio(struct task_stru
62148 error = -EACCES;
62149 goto out;
62150 }
62151+
62152+ if (gr_handle_chroot_setpriority(p, niceval)) {
62153+ error = -EACCES;
62154+ goto out;
62155+ }
62156+
62157 no_nice = security_task_setnice(p, niceval);
62158 if (no_nice) {
62159 error = no_nice;
62160@@ -541,6 +547,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
62161 goto error;
62162 }
62163
62164+ if (gr_check_group_change(new->gid, new->egid, -1))
62165+ goto error;
62166+
62167 if (rgid != (gid_t) -1 ||
62168 (egid != (gid_t) -1 && egid != old->gid))
62169 new->sgid = new->egid;
62170@@ -570,6 +579,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
62171 old = current_cred();
62172
62173 retval = -EPERM;
62174+
62175+ if (gr_check_group_change(gid, gid, gid))
62176+ goto error;
62177+
62178 if (nsown_capable(CAP_SETGID))
62179 new->gid = new->egid = new->sgid = new->fsgid = gid;
62180 else if (gid == old->gid || gid == old->sgid)
62181@@ -595,11 +608,18 @@ static int set_user(struct cred *new)
62182 if (!new_user)
62183 return -EAGAIN;
62184
62185+ /*
62186+ * We don't fail in case of NPROC limit excess here because too many
62187+ * poorly written programs don't check set*uid() return code, assuming
62188+ * it never fails if called by root. We may still enforce NPROC limit
62189+ * for programs doing set*uid()+execve() by harmlessly deferring the
62190+ * failure to the execve() stage.
62191+ */
62192 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
62193- new_user != INIT_USER) {
62194- free_uid(new_user);
62195- return -EAGAIN;
62196- }
62197+ new_user != INIT_USER)
62198+ current->flags |= PF_NPROC_EXCEEDED;
62199+ else
62200+ current->flags &= ~PF_NPROC_EXCEEDED;
62201
62202 free_uid(new->user);
62203 new->user = new_user;
62204@@ -650,6 +670,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
62205 goto error;
62206 }
62207
62208+ if (gr_check_user_change(new->uid, new->euid, -1))
62209+ goto error;
62210+
62211 if (new->uid != old->uid) {
62212 retval = set_user(new);
62213 if (retval < 0)
62214@@ -694,6 +717,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
62215 old = current_cred();
62216
62217 retval = -EPERM;
62218+
62219+ if (gr_check_crash_uid(uid))
62220+ goto error;
62221+ if (gr_check_user_change(uid, uid, uid))
62222+ goto error;
62223+
62224 if (nsown_capable(CAP_SETUID)) {
62225 new->suid = new->uid = uid;
62226 if (uid != old->uid) {
62227@@ -748,6 +777,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
62228 goto error;
62229 }
62230
62231+ if (gr_check_user_change(ruid, euid, -1))
62232+ goto error;
62233+
62234 if (ruid != (uid_t) -1) {
62235 new->uid = ruid;
62236 if (ruid != old->uid) {
62237@@ -812,6 +844,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
62238 goto error;
62239 }
62240
62241+ if (gr_check_group_change(rgid, egid, -1))
62242+ goto error;
62243+
62244 if (rgid != (gid_t) -1)
62245 new->gid = rgid;
62246 if (egid != (gid_t) -1)
62247@@ -858,6 +893,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62248 old = current_cred();
62249 old_fsuid = old->fsuid;
62250
62251+ if (gr_check_user_change(-1, -1, uid))
62252+ goto error;
62253+
62254 if (uid == old->uid || uid == old->euid ||
62255 uid == old->suid || uid == old->fsuid ||
62256 nsown_capable(CAP_SETUID)) {
62257@@ -868,6 +906,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62258 }
62259 }
62260
62261+error:
62262 abort_creds(new);
62263 return old_fsuid;
62264
62265@@ -894,12 +933,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
62266 if (gid == old->gid || gid == old->egid ||
62267 gid == old->sgid || gid == old->fsgid ||
62268 nsown_capable(CAP_SETGID)) {
62269+ if (gr_check_group_change(-1, -1, gid))
62270+ goto error;
62271+
62272 if (gid != old_fsgid) {
62273 new->fsgid = gid;
62274 goto change_okay;
62275 }
62276 }
62277
62278+error:
62279 abort_creds(new);
62280 return old_fsgid;
62281
62282@@ -1680,7 +1723,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
62283 error = get_dumpable(me->mm);
62284 break;
62285 case PR_SET_DUMPABLE:
62286- if (arg2 < 0 || arg2 > 1) {
62287+ if (arg2 > 1) {
62288 error = -EINVAL;
62289 break;
62290 }
62291diff -urNp linux-3.0.4/kernel/sysctl.c linux-3.0.4/kernel/sysctl.c
62292--- linux-3.0.4/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
62293+++ linux-3.0.4/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
62294@@ -85,6 +85,13 @@
62295
62296
62297 #if defined(CONFIG_SYSCTL)
62298+#include <linux/grsecurity.h>
62299+#include <linux/grinternal.h>
62300+
62301+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
62302+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
62303+ const int op);
62304+extern int gr_handle_chroot_sysctl(const int op);
62305
62306 /* External variables not in a header file. */
62307 extern int sysctl_overcommit_memory;
62308@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
62309 }
62310
62311 #endif
62312+extern struct ctl_table grsecurity_table[];
62313
62314 static struct ctl_table root_table[];
62315 static struct ctl_table_root sysctl_table_root;
62316@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
62317 int sysctl_legacy_va_layout;
62318 #endif
62319
62320+#ifdef CONFIG_PAX_SOFTMODE
62321+static ctl_table pax_table[] = {
62322+ {
62323+ .procname = "softmode",
62324+ .data = &pax_softmode,
62325+ .maxlen = sizeof(unsigned int),
62326+ .mode = 0600,
62327+ .proc_handler = &proc_dointvec,
62328+ },
62329+
62330+ { }
62331+};
62332+#endif
62333+
62334 /* The default sysctl tables: */
62335
62336 static struct ctl_table root_table[] = {
62337@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
62338 #endif
62339
62340 static struct ctl_table kern_table[] = {
62341+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
62342+ {
62343+ .procname = "grsecurity",
62344+ .mode = 0500,
62345+ .child = grsecurity_table,
62346+ },
62347+#endif
62348+
62349+#ifdef CONFIG_PAX_SOFTMODE
62350+ {
62351+ .procname = "pax",
62352+ .mode = 0500,
62353+ .child = pax_table,
62354+ },
62355+#endif
62356+
62357 {
62358 .procname = "sched_child_runs_first",
62359 .data = &sysctl_sched_child_runs_first,
62360@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
62361 .data = &modprobe_path,
62362 .maxlen = KMOD_PATH_LEN,
62363 .mode = 0644,
62364- .proc_handler = proc_dostring,
62365+ .proc_handler = proc_dostring_modpriv,
62366 },
62367 {
62368 .procname = "modules_disabled",
62369@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
62370 .extra1 = &zero,
62371 .extra2 = &one,
62372 },
62373+#endif
62374 {
62375 .procname = "kptr_restrict",
62376 .data = &kptr_restrict,
62377 .maxlen = sizeof(int),
62378 .mode = 0644,
62379 .proc_handler = proc_dmesg_restrict,
62380+#ifdef CONFIG_GRKERNSEC_HIDESYM
62381+ .extra1 = &two,
62382+#else
62383 .extra1 = &zero,
62384+#endif
62385 .extra2 = &two,
62386 },
62387-#endif
62388 {
62389 .procname = "ngroups_max",
62390 .data = &ngroups_max,
62391@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
62392 .proc_handler = proc_dointvec_minmax,
62393 .extra1 = &zero,
62394 },
62395+ {
62396+ .procname = "heap_stack_gap",
62397+ .data = &sysctl_heap_stack_gap,
62398+ .maxlen = sizeof(sysctl_heap_stack_gap),
62399+ .mode = 0644,
62400+ .proc_handler = proc_doulongvec_minmax,
62401+ },
62402 #else
62403 {
62404 .procname = "nr_trim_pages",
62405@@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
62406 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
62407 {
62408 int mode;
62409+ int error;
62410+
62411+ if (table->parent != NULL && table->parent->procname != NULL &&
62412+ table->procname != NULL &&
62413+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
62414+ return -EACCES;
62415+ if (gr_handle_chroot_sysctl(op))
62416+ return -EACCES;
62417+ error = gr_handle_sysctl(table, op);
62418+ if (error)
62419+ return error;
62420
62421 if (root->permissions)
62422 mode = root->permissions(root, current->nsproxy, table);
62423@@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
62424 buffer, lenp, ppos);
62425 }
62426
62427+int proc_dostring_modpriv(struct ctl_table *table, int write,
62428+ void __user *buffer, size_t *lenp, loff_t *ppos)
62429+{
62430+ if (write && !capable(CAP_SYS_MODULE))
62431+ return -EPERM;
62432+
62433+ return _proc_do_string(table->data, table->maxlen, write,
62434+ buffer, lenp, ppos);
62435+}
62436+
62437 static size_t proc_skip_spaces(char **buf)
62438 {
62439 size_t ret;
62440@@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
62441 len = strlen(tmp);
62442 if (len > *size)
62443 len = *size;
62444+ if (len > sizeof(tmp))
62445+ len = sizeof(tmp);
62446 if (copy_to_user(*buf, tmp, len))
62447 return -EFAULT;
62448 *size -= len;
62449@@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
62450 *i = val;
62451 } else {
62452 val = convdiv * (*i) / convmul;
62453- if (!first)
62454+ if (!first) {
62455 err = proc_put_char(&buffer, &left, '\t');
62456+ if (err)
62457+ break;
62458+ }
62459 err = proc_put_long(&buffer, &left, val, false);
62460 if (err)
62461 break;
62462@@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
62463 return -ENOSYS;
62464 }
62465
62466+int proc_dostring_modpriv(struct ctl_table *table, int write,
62467+ void __user *buffer, size_t *lenp, loff_t *ppos)
62468+{
62469+ return -ENOSYS;
62470+}
62471+
62472 int proc_dointvec(struct ctl_table *table, int write,
62473 void __user *buffer, size_t *lenp, loff_t *ppos)
62474 {
62475@@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
62476 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
62477 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
62478 EXPORT_SYMBOL(proc_dostring);
62479+EXPORT_SYMBOL(proc_dostring_modpriv);
62480 EXPORT_SYMBOL(proc_doulongvec_minmax);
62481 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
62482 EXPORT_SYMBOL(register_sysctl_table);
62483diff -urNp linux-3.0.4/kernel/sysctl_check.c linux-3.0.4/kernel/sysctl_check.c
62484--- linux-3.0.4/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
62485+++ linux-3.0.4/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
62486@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
62487 set_fail(&fail, table, "Directory with extra2");
62488 } else {
62489 if ((table->proc_handler == proc_dostring) ||
62490+ (table->proc_handler == proc_dostring_modpriv) ||
62491 (table->proc_handler == proc_dointvec) ||
62492 (table->proc_handler == proc_dointvec_minmax) ||
62493 (table->proc_handler == proc_dointvec_jiffies) ||
62494diff -urNp linux-3.0.4/kernel/taskstats.c linux-3.0.4/kernel/taskstats.c
62495--- linux-3.0.4/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
62496+++ linux-3.0.4/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
62497@@ -27,9 +27,12 @@
62498 #include <linux/cgroup.h>
62499 #include <linux/fs.h>
62500 #include <linux/file.h>
62501+#include <linux/grsecurity.h>
62502 #include <net/genetlink.h>
62503 #include <asm/atomic.h>
62504
62505+extern int gr_is_taskstats_denied(int pid);
62506+
62507 /*
62508 * Maximum length of a cpumask that can be specified in
62509 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
62510@@ -558,6 +561,9 @@ err:
62511
62512 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
62513 {
62514+ if (gr_is_taskstats_denied(current->pid))
62515+ return -EACCES;
62516+
62517 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
62518 return cmd_attr_register_cpumask(info);
62519 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
62520diff -urNp linux-3.0.4/kernel/time/alarmtimer.c linux-3.0.4/kernel/time/alarmtimer.c
62521--- linux-3.0.4/kernel/time/alarmtimer.c 2011-07-21 22:17:23.000000000 -0400
62522+++ linux-3.0.4/kernel/time/alarmtimer.c 2011-08-23 21:47:56.000000000 -0400
62523@@ -685,7 +685,7 @@ static int __init alarmtimer_init(void)
62524 {
62525 int error = 0;
62526 int i;
62527- struct k_clock alarm_clock = {
62528+ static struct k_clock alarm_clock = {
62529 .clock_getres = alarm_clock_getres,
62530 .clock_get = alarm_clock_get,
62531 .timer_create = alarm_timer_create,
62532diff -urNp linux-3.0.4/kernel/time/tick-broadcast.c linux-3.0.4/kernel/time/tick-broadcast.c
62533--- linux-3.0.4/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
62534+++ linux-3.0.4/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
62535@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
62536 * then clear the broadcast bit.
62537 */
62538 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
62539- int cpu = smp_processor_id();
62540+ cpu = smp_processor_id();
62541
62542 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
62543 tick_broadcast_clear_oneshot(cpu);
62544diff -urNp linux-3.0.4/kernel/time/timekeeping.c linux-3.0.4/kernel/time/timekeeping.c
62545--- linux-3.0.4/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
62546+++ linux-3.0.4/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
62547@@ -14,6 +14,7 @@
62548 #include <linux/init.h>
62549 #include <linux/mm.h>
62550 #include <linux/sched.h>
62551+#include <linux/grsecurity.h>
62552 #include <linux/syscore_ops.h>
62553 #include <linux/clocksource.h>
62554 #include <linux/jiffies.h>
62555@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
62556 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
62557 return -EINVAL;
62558
62559+ gr_log_timechange();
62560+
62561 write_seqlock_irqsave(&xtime_lock, flags);
62562
62563 timekeeping_forward_now();
62564diff -urNp linux-3.0.4/kernel/time/timer_list.c linux-3.0.4/kernel/time/timer_list.c
62565--- linux-3.0.4/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
62566+++ linux-3.0.4/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
62567@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
62568
62569 static void print_name_offset(struct seq_file *m, void *sym)
62570 {
62571+#ifdef CONFIG_GRKERNSEC_HIDESYM
62572+ SEQ_printf(m, "<%p>", NULL);
62573+#else
62574 char symname[KSYM_NAME_LEN];
62575
62576 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
62577 SEQ_printf(m, "<%pK>", sym);
62578 else
62579 SEQ_printf(m, "%s", symname);
62580+#endif
62581 }
62582
62583 static void
62584@@ -112,7 +116,11 @@ next_one:
62585 static void
62586 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
62587 {
62588+#ifdef CONFIG_GRKERNSEC_HIDESYM
62589+ SEQ_printf(m, " .base: %p\n", NULL);
62590+#else
62591 SEQ_printf(m, " .base: %pK\n", base);
62592+#endif
62593 SEQ_printf(m, " .index: %d\n",
62594 base->index);
62595 SEQ_printf(m, " .resolution: %Lu nsecs\n",
62596@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
62597 {
62598 struct proc_dir_entry *pe;
62599
62600+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62601+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
62602+#else
62603 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
62604+#endif
62605 if (!pe)
62606 return -ENOMEM;
62607 return 0;
62608diff -urNp linux-3.0.4/kernel/time/timer_stats.c linux-3.0.4/kernel/time/timer_stats.c
62609--- linux-3.0.4/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
62610+++ linux-3.0.4/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
62611@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
62612 static unsigned long nr_entries;
62613 static struct entry entries[MAX_ENTRIES];
62614
62615-static atomic_t overflow_count;
62616+static atomic_unchecked_t overflow_count;
62617
62618 /*
62619 * The entries are in a hash-table, for fast lookup:
62620@@ -140,7 +140,7 @@ static void reset_entries(void)
62621 nr_entries = 0;
62622 memset(entries, 0, sizeof(entries));
62623 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
62624- atomic_set(&overflow_count, 0);
62625+ atomic_set_unchecked(&overflow_count, 0);
62626 }
62627
62628 static struct entry *alloc_entry(void)
62629@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
62630 if (likely(entry))
62631 entry->count++;
62632 else
62633- atomic_inc(&overflow_count);
62634+ atomic_inc_unchecked(&overflow_count);
62635
62636 out_unlock:
62637 raw_spin_unlock_irqrestore(lock, flags);
62638@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
62639
62640 static void print_name_offset(struct seq_file *m, unsigned long addr)
62641 {
62642+#ifdef CONFIG_GRKERNSEC_HIDESYM
62643+ seq_printf(m, "<%p>", NULL);
62644+#else
62645 char symname[KSYM_NAME_LEN];
62646
62647 if (lookup_symbol_name(addr, symname) < 0)
62648 seq_printf(m, "<%p>", (void *)addr);
62649 else
62650 seq_printf(m, "%s", symname);
62651+#endif
62652 }
62653
62654 static int tstats_show(struct seq_file *m, void *v)
62655@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
62656
62657 seq_puts(m, "Timer Stats Version: v0.2\n");
62658 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
62659- if (atomic_read(&overflow_count))
62660+ if (atomic_read_unchecked(&overflow_count))
62661 seq_printf(m, "Overflow: %d entries\n",
62662- atomic_read(&overflow_count));
62663+ atomic_read_unchecked(&overflow_count));
62664
62665 for (i = 0; i < nr_entries; i++) {
62666 entry = entries + i;
62667@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
62668 {
62669 struct proc_dir_entry *pe;
62670
62671+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62672+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
62673+#else
62674 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
62675+#endif
62676 if (!pe)
62677 return -ENOMEM;
62678 return 0;
62679diff -urNp linux-3.0.4/kernel/time.c linux-3.0.4/kernel/time.c
62680--- linux-3.0.4/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
62681+++ linux-3.0.4/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
62682@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
62683 return error;
62684
62685 if (tz) {
62686+ /* we log in do_settimeofday called below, so don't log twice
62687+ */
62688+ if (!tv)
62689+ gr_log_timechange();
62690+
62691 /* SMP safe, global irq locking makes it work. */
62692 sys_tz = *tz;
62693 update_vsyscall_tz();
62694diff -urNp linux-3.0.4/kernel/timer.c linux-3.0.4/kernel/timer.c
62695--- linux-3.0.4/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
62696+++ linux-3.0.4/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
62697@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
62698 /*
62699 * This function runs timers and the timer-tq in bottom half context.
62700 */
62701-static void run_timer_softirq(struct softirq_action *h)
62702+static void run_timer_softirq(void)
62703 {
62704 struct tvec_base *base = __this_cpu_read(tvec_bases);
62705
62706diff -urNp linux-3.0.4/kernel/trace/blktrace.c linux-3.0.4/kernel/trace/blktrace.c
62707--- linux-3.0.4/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
62708+++ linux-3.0.4/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
62709@@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
62710 struct blk_trace *bt = filp->private_data;
62711 char buf[16];
62712
62713- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
62714+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
62715
62716 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
62717 }
62718@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
62719 return 1;
62720
62721 bt = buf->chan->private_data;
62722- atomic_inc(&bt->dropped);
62723+ atomic_inc_unchecked(&bt->dropped);
62724 return 0;
62725 }
62726
62727@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
62728
62729 bt->dir = dir;
62730 bt->dev = dev;
62731- atomic_set(&bt->dropped, 0);
62732+ atomic_set_unchecked(&bt->dropped, 0);
62733
62734 ret = -EIO;
62735 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
62736diff -urNp linux-3.0.4/kernel/trace/ftrace.c linux-3.0.4/kernel/trace/ftrace.c
62737--- linux-3.0.4/kernel/trace/ftrace.c 2011-07-21 22:17:23.000000000 -0400
62738+++ linux-3.0.4/kernel/trace/ftrace.c 2011-08-23 21:47:56.000000000 -0400
62739@@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
62740 if (unlikely(ftrace_disabled))
62741 return 0;
62742
62743+ ret = ftrace_arch_code_modify_prepare();
62744+ FTRACE_WARN_ON(ret);
62745+ if (ret)
62746+ return 0;
62747+
62748 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
62749+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
62750 if (ret) {
62751 ftrace_bug(ret, ip);
62752- return 0;
62753 }
62754- return 1;
62755+ return ret ? 0 : 1;
62756 }
62757
62758 /*
62759@@ -2550,7 +2555,7 @@ static void ftrace_free_entry_rcu(struct
62760
62761 int
62762 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
62763- void *data)
62764+ void *data)
62765 {
62766 struct ftrace_func_probe *entry;
62767 struct ftrace_page *pg;
62768diff -urNp linux-3.0.4/kernel/trace/trace.c linux-3.0.4/kernel/trace/trace.c
62769--- linux-3.0.4/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
62770+++ linux-3.0.4/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
62771@@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
62772 size_t rem;
62773 unsigned int i;
62774
62775+ pax_track_stack();
62776+
62777 if (splice_grow_spd(pipe, &spd))
62778 return -ENOMEM;
62779
62780@@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
62781 int entries, size, i;
62782 size_t ret;
62783
62784+ pax_track_stack();
62785+
62786 if (splice_grow_spd(pipe, &spd))
62787 return -ENOMEM;
62788
62789@@ -3990,10 +3994,9 @@ static const struct file_operations trac
62790 };
62791 #endif
62792
62793-static struct dentry *d_tracer;
62794-
62795 struct dentry *tracing_init_dentry(void)
62796 {
62797+ static struct dentry *d_tracer;
62798 static int once;
62799
62800 if (d_tracer)
62801@@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
62802 return d_tracer;
62803 }
62804
62805-static struct dentry *d_percpu;
62806-
62807 struct dentry *tracing_dentry_percpu(void)
62808 {
62809+ static struct dentry *d_percpu;
62810 static int once;
62811 struct dentry *d_tracer;
62812
62813diff -urNp linux-3.0.4/kernel/trace/trace_events.c linux-3.0.4/kernel/trace/trace_events.c
62814--- linux-3.0.4/kernel/trace/trace_events.c 2011-08-23 21:44:40.000000000 -0400
62815+++ linux-3.0.4/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
62816@@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
62817 struct ftrace_module_file_ops {
62818 struct list_head list;
62819 struct module *mod;
62820- struct file_operations id;
62821- struct file_operations enable;
62822- struct file_operations format;
62823- struct file_operations filter;
62824 };
62825
62826 static struct ftrace_module_file_ops *
62827@@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
62828
62829 file_ops->mod = mod;
62830
62831- file_ops->id = ftrace_event_id_fops;
62832- file_ops->id.owner = mod;
62833-
62834- file_ops->enable = ftrace_enable_fops;
62835- file_ops->enable.owner = mod;
62836-
62837- file_ops->filter = ftrace_event_filter_fops;
62838- file_ops->filter.owner = mod;
62839-
62840- file_ops->format = ftrace_event_format_fops;
62841- file_ops->format.owner = mod;
62842+ pax_open_kernel();
62843+ *(void **)&mod->trace_id.owner = mod;
62844+ *(void **)&mod->trace_enable.owner = mod;
62845+ *(void **)&mod->trace_filter.owner = mod;
62846+ *(void **)&mod->trace_format.owner = mod;
62847+ pax_close_kernel();
62848
62849 list_add(&file_ops->list, &ftrace_module_file_list);
62850
62851@@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
62852
62853 for_each_event(call, start, end) {
62854 __trace_add_event_call(*call, mod,
62855- &file_ops->id, &file_ops->enable,
62856- &file_ops->filter, &file_ops->format);
62857+ &mod->trace_id, &mod->trace_enable,
62858+ &mod->trace_filter, &mod->trace_format);
62859 }
62860 }
62861
62862diff -urNp linux-3.0.4/kernel/trace/trace_mmiotrace.c linux-3.0.4/kernel/trace/trace_mmiotrace.c
62863--- linux-3.0.4/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
62864+++ linux-3.0.4/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
62865@@ -24,7 +24,7 @@ struct header_iter {
62866 static struct trace_array *mmio_trace_array;
62867 static bool overrun_detected;
62868 static unsigned long prev_overruns;
62869-static atomic_t dropped_count;
62870+static atomic_unchecked_t dropped_count;
62871
62872 static void mmio_reset_data(struct trace_array *tr)
62873 {
62874@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
62875
62876 static unsigned long count_overruns(struct trace_iterator *iter)
62877 {
62878- unsigned long cnt = atomic_xchg(&dropped_count, 0);
62879+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
62880 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
62881
62882 if (over > prev_overruns)
62883@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
62884 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
62885 sizeof(*entry), 0, pc);
62886 if (!event) {
62887- atomic_inc(&dropped_count);
62888+ atomic_inc_unchecked(&dropped_count);
62889 return;
62890 }
62891 entry = ring_buffer_event_data(event);
62892@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
62893 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
62894 sizeof(*entry), 0, pc);
62895 if (!event) {
62896- atomic_inc(&dropped_count);
62897+ atomic_inc_unchecked(&dropped_count);
62898 return;
62899 }
62900 entry = ring_buffer_event_data(event);
62901diff -urNp linux-3.0.4/kernel/trace/trace_output.c linux-3.0.4/kernel/trace/trace_output.c
62902--- linux-3.0.4/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
62903+++ linux-3.0.4/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
62904@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
62905
62906 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
62907 if (!IS_ERR(p)) {
62908- p = mangle_path(s->buffer + s->len, p, "\n");
62909+ p = mangle_path(s->buffer + s->len, p, "\n\\");
62910 if (p) {
62911 s->len = p - s->buffer;
62912 return 1;
62913diff -urNp linux-3.0.4/kernel/trace/trace_stack.c linux-3.0.4/kernel/trace/trace_stack.c
62914--- linux-3.0.4/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
62915+++ linux-3.0.4/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
62916@@ -50,7 +50,7 @@ static inline void check_stack(void)
62917 return;
62918
62919 /* we do not handle interrupt stacks yet */
62920- if (!object_is_on_stack(&this_size))
62921+ if (!object_starts_on_stack(&this_size))
62922 return;
62923
62924 local_irq_save(flags);
62925diff -urNp linux-3.0.4/kernel/trace/trace_workqueue.c linux-3.0.4/kernel/trace/trace_workqueue.c
62926--- linux-3.0.4/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
62927+++ linux-3.0.4/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
62928@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
62929 int cpu;
62930 pid_t pid;
62931 /* Can be inserted from interrupt or user context, need to be atomic */
62932- atomic_t inserted;
62933+ atomic_unchecked_t inserted;
62934 /*
62935 * Don't need to be atomic, works are serialized in a single workqueue thread
62936 * on a single CPU.
62937@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
62938 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
62939 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
62940 if (node->pid == wq_thread->pid) {
62941- atomic_inc(&node->inserted);
62942+ atomic_inc_unchecked(&node->inserted);
62943 goto found;
62944 }
62945 }
62946@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
62947 tsk = get_pid_task(pid, PIDTYPE_PID);
62948 if (tsk) {
62949 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
62950- atomic_read(&cws->inserted), cws->executed,
62951+ atomic_read_unchecked(&cws->inserted), cws->executed,
62952 tsk->comm);
62953 put_task_struct(tsk);
62954 }
62955diff -urNp linux-3.0.4/lib/bug.c linux-3.0.4/lib/bug.c
62956--- linux-3.0.4/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
62957+++ linux-3.0.4/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
62958@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
62959 return BUG_TRAP_TYPE_NONE;
62960
62961 bug = find_bug(bugaddr);
62962+ if (!bug)
62963+ return BUG_TRAP_TYPE_NONE;
62964
62965 file = NULL;
62966 line = 0;
62967diff -urNp linux-3.0.4/lib/debugobjects.c linux-3.0.4/lib/debugobjects.c
62968--- linux-3.0.4/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
62969+++ linux-3.0.4/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
62970@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
62971 if (limit > 4)
62972 return;
62973
62974- is_on_stack = object_is_on_stack(addr);
62975+ is_on_stack = object_starts_on_stack(addr);
62976 if (is_on_stack == onstack)
62977 return;
62978
62979diff -urNp linux-3.0.4/lib/dma-debug.c linux-3.0.4/lib/dma-debug.c
62980--- linux-3.0.4/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
62981+++ linux-3.0.4/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
62982@@ -870,7 +870,7 @@ out:
62983
62984 static void check_for_stack(struct device *dev, void *addr)
62985 {
62986- if (object_is_on_stack(addr))
62987+ if (object_starts_on_stack(addr))
62988 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
62989 "stack [addr=%p]\n", addr);
62990 }
62991diff -urNp linux-3.0.4/lib/extable.c linux-3.0.4/lib/extable.c
62992--- linux-3.0.4/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
62993+++ linux-3.0.4/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
62994@@ -13,6 +13,7 @@
62995 #include <linux/init.h>
62996 #include <linux/sort.h>
62997 #include <asm/uaccess.h>
62998+#include <asm/pgtable.h>
62999
63000 #ifndef ARCH_HAS_SORT_EXTABLE
63001 /*
63002@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
63003 void sort_extable(struct exception_table_entry *start,
63004 struct exception_table_entry *finish)
63005 {
63006+ pax_open_kernel();
63007 sort(start, finish - start, sizeof(struct exception_table_entry),
63008 cmp_ex, NULL);
63009+ pax_close_kernel();
63010 }
63011
63012 #ifdef CONFIG_MODULES
63013diff -urNp linux-3.0.4/lib/inflate.c linux-3.0.4/lib/inflate.c
63014--- linux-3.0.4/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
63015+++ linux-3.0.4/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
63016@@ -269,7 +269,7 @@ static void free(void *where)
63017 malloc_ptr = free_mem_ptr;
63018 }
63019 #else
63020-#define malloc(a) kmalloc(a, GFP_KERNEL)
63021+#define malloc(a) kmalloc((a), GFP_KERNEL)
63022 #define free(a) kfree(a)
63023 #endif
63024
63025diff -urNp linux-3.0.4/lib/Kconfig.debug linux-3.0.4/lib/Kconfig.debug
63026--- linux-3.0.4/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
63027+++ linux-3.0.4/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
63028@@ -1088,6 +1088,7 @@ config LATENCYTOP
63029 depends on DEBUG_KERNEL
63030 depends on STACKTRACE_SUPPORT
63031 depends on PROC_FS
63032+ depends on !GRKERNSEC_HIDESYM
63033 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
63034 select KALLSYMS
63035 select KALLSYMS_ALL
63036diff -urNp linux-3.0.4/lib/kref.c linux-3.0.4/lib/kref.c
63037--- linux-3.0.4/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
63038+++ linux-3.0.4/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
63039@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
63040 */
63041 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
63042 {
63043- WARN_ON(release == NULL);
63044+ BUG_ON(release == NULL);
63045 WARN_ON(release == (void (*)(struct kref *))kfree);
63046
63047 if (atomic_dec_and_test(&kref->refcount)) {
63048diff -urNp linux-3.0.4/lib/radix-tree.c linux-3.0.4/lib/radix-tree.c
63049--- linux-3.0.4/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
63050+++ linux-3.0.4/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
63051@@ -80,7 +80,7 @@ struct radix_tree_preload {
63052 int nr;
63053 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
63054 };
63055-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
63056+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
63057
63058 static inline void *ptr_to_indirect(void *ptr)
63059 {
63060diff -urNp linux-3.0.4/lib/vsprintf.c linux-3.0.4/lib/vsprintf.c
63061--- linux-3.0.4/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
63062+++ linux-3.0.4/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
63063@@ -16,6 +16,9 @@
63064 * - scnprintf and vscnprintf
63065 */
63066
63067+#ifdef CONFIG_GRKERNSEC_HIDESYM
63068+#define __INCLUDED_BY_HIDESYM 1
63069+#endif
63070 #include <stdarg.h>
63071 #include <linux/module.h>
63072 #include <linux/types.h>
63073@@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
63074 char sym[KSYM_SYMBOL_LEN];
63075 if (ext == 'B')
63076 sprint_backtrace(sym, value);
63077- else if (ext != 'f' && ext != 's')
63078+ else if (ext != 'f' && ext != 's' && ext != 'a')
63079 sprint_symbol(sym, value);
63080 else
63081 kallsyms_lookup(value, NULL, NULL, NULL, sym);
63082@@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
63083 return string(buf, end, uuid, spec);
63084 }
63085
63086+#ifdef CONFIG_GRKERNSEC_HIDESYM
63087+int kptr_restrict __read_mostly = 2;
63088+#else
63089 int kptr_restrict __read_mostly;
63090+#endif
63091
63092 /*
63093 * Show a '%p' thing. A kernel extension is that the '%p' is followed
63094@@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
63095 * - 'S' For symbolic direct pointers with offset
63096 * - 's' For symbolic direct pointers without offset
63097 * - 'B' For backtraced symbolic direct pointers with offset
63098+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
63099+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
63100 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
63101 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
63102 * - 'M' For a 6-byte MAC address, it prints the address in the
63103@@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
63104 {
63105 if (!ptr && *fmt != 'K') {
63106 /*
63107- * Print (null) with the same width as a pointer so it makes
63108+ * Print (nil) with the same width as a pointer so it makes
63109 * tabular output look nice.
63110 */
63111 if (spec.field_width == -1)
63112 spec.field_width = 2 * sizeof(void *);
63113- return string(buf, end, "(null)", spec);
63114+ return string(buf, end, "(nil)", spec);
63115 }
63116
63117 switch (*fmt) {
63118@@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
63119 /* Fallthrough */
63120 case 'S':
63121 case 's':
63122+#ifdef CONFIG_GRKERNSEC_HIDESYM
63123+ break;
63124+#else
63125+ return symbol_string(buf, end, ptr, spec, *fmt);
63126+#endif
63127+ case 'A':
63128+ case 'a':
63129 case 'B':
63130 return symbol_string(buf, end, ptr, spec, *fmt);
63131 case 'R':
63132@@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
63133 typeof(type) value; \
63134 if (sizeof(type) == 8) { \
63135 args = PTR_ALIGN(args, sizeof(u32)); \
63136- *(u32 *)&value = *(u32 *)args; \
63137- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
63138+ *(u32 *)&value = *(const u32 *)args; \
63139+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
63140 } else { \
63141 args = PTR_ALIGN(args, sizeof(type)); \
63142- value = *(typeof(type) *)args; \
63143+ value = *(const typeof(type) *)args; \
63144 } \
63145 args += sizeof(type); \
63146 value; \
63147@@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
63148 case FORMAT_TYPE_STR: {
63149 const char *str_arg = args;
63150 args += strlen(str_arg) + 1;
63151- str = string(str, end, (char *)str_arg, spec);
63152+ str = string(str, end, str_arg, spec);
63153 break;
63154 }
63155
63156diff -urNp linux-3.0.4/localversion-grsec linux-3.0.4/localversion-grsec
63157--- linux-3.0.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
63158+++ linux-3.0.4/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
63159@@ -0,0 +1 @@
63160+-grsec
63161diff -urNp linux-3.0.4/Makefile linux-3.0.4/Makefile
63162--- linux-3.0.4/Makefile 2011-08-29 23:26:13.000000000 -0400
63163+++ linux-3.0.4/Makefile 2011-08-30 18:24:49.000000000 -0400
63164@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
63165
63166 HOSTCC = gcc
63167 HOSTCXX = g++
63168-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
63169-HOSTCXXFLAGS = -O2
63170+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
63171+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
63172+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
63173
63174 # Decide whether to build built-in, modular, or both.
63175 # Normally, just do built-in.
63176@@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
63177 KBUILD_CPPFLAGS := -D__KERNEL__
63178
63179 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
63180+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
63181 -fno-strict-aliasing -fno-common \
63182 -Werror-implicit-function-declaration \
63183 -Wno-format-security \
63184 -fno-delete-null-pointer-checks
63185+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
63186 KBUILD_AFLAGS_KERNEL :=
63187 KBUILD_CFLAGS_KERNEL :=
63188 KBUILD_AFLAGS := -D__ASSEMBLY__
63189@@ -564,6 +567,24 @@ else
63190 KBUILD_CFLAGS += -O2
63191 endif
63192
63193+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
63194+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
63195+ifdef CONFIG_PAX_MEMORY_STACKLEAK
63196+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
63197+endif
63198+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
63199+gcc-plugins: prepare
63200+ $(Q)$(MAKE) $(build)=tools/gcc
63201+else
63202+gcc-plugins:
63203+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
63204+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
63205+else
63206+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
63207+endif
63208+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
63209+endif
63210+
63211 include $(srctree)/arch/$(SRCARCH)/Makefile
63212
63213 ifneq ($(CONFIG_FRAME_WARN),0)
63214@@ -708,7 +729,7 @@ export mod_strip_cmd
63215
63216
63217 ifeq ($(KBUILD_EXTMOD),)
63218-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
63219+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
63220
63221 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
63222 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
63223@@ -907,6 +928,8 @@ define rule_vmlinux-modpost
63224 endef
63225
63226 # vmlinux image - including updated kernel symbols
63227+$(vmlinux-all): KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
63228+$(vmlinux-all): gcc-plugins
63229 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
63230 ifdef CONFIG_HEADERS_CHECK
63231 $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
63232@@ -932,7 +955,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-mai
63233
63234 # The actual objects are generated when descending,
63235 # make sure no implicit rule kicks in
63236-$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
63237+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): prepare scripts $(vmlinux-dirs) ;
63238
63239 # Handle descending into subdirectories listed in $(vmlinux-dirs)
63240 # Preset locale variables to speed up the build process. Limit locale
63241@@ -941,7 +964,7 @@ $(sort $(vmlinux-init) $(vmlinux-main))
63242 # Error messages still appears in the original language
63243
63244 PHONY += $(vmlinux-dirs)
63245-$(vmlinux-dirs): prepare scripts
63246+$(vmlinux-dirs): gcc-plugins
63247 $(Q)$(MAKE) $(build)=$@
63248
63249 # Store (new) KERNELRELASE string in include/config/kernel.release
63250@@ -1087,7 +1110,8 @@ all: modules
63251 # using awk while concatenating to the final file.
63252
63253 PHONY += modules
63254-modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
63255+$(vmlinux-dirs): KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
63256+modules: prepare scripts $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
63257 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
63258 @$(kecho) ' Building modules, stage 2.';
63259 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
63260@@ -1102,7 +1126,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
63261
63262 # Target to prepare building external modules
63263 PHONY += modules_prepare
63264-modules_prepare: prepare scripts
63265+modules_prepare: gcc-plugins prepare scripts
63266
63267 # Target to install modules
63268 PHONY += modules_install
63269@@ -1198,7 +1222,7 @@ distclean: mrproper
63270 @find $(srctree) $(RCS_FIND_IGNORE) \
63271 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
63272 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
63273- -o -name '.*.rej' -o -size 0 \
63274+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
63275 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
63276 -type f -print | xargs rm -f
63277
63278@@ -1359,6 +1383,7 @@ PHONY += $(module-dirs) modules
63279 $(module-dirs): crmodverdir $(objtree)/Module.symvers
63280 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
63281
63282+modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
63283 modules: $(module-dirs)
63284 @$(kecho) ' Building modules, stage 2.';
63285 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
63286@@ -1489,13 +1514,14 @@ endif
63287 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63288 %.i: %.c prepare scripts FORCE
63289 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63290-%.o: %.c prepare scripts FORCE
63291+%.o: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
63292+%.o: %.c gcc-plugins prepare scripts FORCE
63293 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63294 %.lst: %.c prepare scripts FORCE
63295 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63296 %.s: %.S prepare scripts FORCE
63297 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63298-%.o: %.S prepare scripts FORCE
63299+%.o: %.S gcc-plugins prepare scripts FORCE
63300 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63301 %.symtypes: %.c prepare scripts FORCE
63302 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63303diff -urNp linux-3.0.4/mm/filemap.c linux-3.0.4/mm/filemap.c
63304--- linux-3.0.4/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
63305+++ linux-3.0.4/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
63306@@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
63307 struct address_space *mapping = file->f_mapping;
63308
63309 if (!mapping->a_ops->readpage)
63310- return -ENOEXEC;
63311+ return -ENODEV;
63312 file_accessed(file);
63313 vma->vm_ops = &generic_file_vm_ops;
63314 vma->vm_flags |= VM_CAN_NONLINEAR;
63315@@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
63316 *pos = i_size_read(inode);
63317
63318 if (limit != RLIM_INFINITY) {
63319+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
63320 if (*pos >= limit) {
63321 send_sig(SIGXFSZ, current, 0);
63322 return -EFBIG;
63323diff -urNp linux-3.0.4/mm/fremap.c linux-3.0.4/mm/fremap.c
63324--- linux-3.0.4/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
63325+++ linux-3.0.4/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
63326@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63327 retry:
63328 vma = find_vma(mm, start);
63329
63330+#ifdef CONFIG_PAX_SEGMEXEC
63331+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
63332+ goto out;
63333+#endif
63334+
63335 /*
63336 * Make sure the vma is shared, that it supports prefaulting,
63337 * and that the remapped range is valid and fully within
63338diff -urNp linux-3.0.4/mm/highmem.c linux-3.0.4/mm/highmem.c
63339--- linux-3.0.4/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
63340+++ linux-3.0.4/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
63341@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
63342 * So no dangers, even with speculative execution.
63343 */
63344 page = pte_page(pkmap_page_table[i]);
63345+ pax_open_kernel();
63346 pte_clear(&init_mm, (unsigned long)page_address(page),
63347 &pkmap_page_table[i]);
63348-
63349+ pax_close_kernel();
63350 set_page_address(page, NULL);
63351 need_flush = 1;
63352 }
63353@@ -186,9 +187,11 @@ start:
63354 }
63355 }
63356 vaddr = PKMAP_ADDR(last_pkmap_nr);
63357+
63358+ pax_open_kernel();
63359 set_pte_at(&init_mm, vaddr,
63360 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
63361-
63362+ pax_close_kernel();
63363 pkmap_count[last_pkmap_nr] = 1;
63364 set_page_address(page, (void *)vaddr);
63365
63366diff -urNp linux-3.0.4/mm/huge_memory.c linux-3.0.4/mm/huge_memory.c
63367--- linux-3.0.4/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
63368+++ linux-3.0.4/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
63369@@ -702,7 +702,7 @@ out:
63370 * run pte_offset_map on the pmd, if an huge pmd could
63371 * materialize from under us from a different thread.
63372 */
63373- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
63374+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
63375 return VM_FAULT_OOM;
63376 /* if an huge pmd materialized from under us just retry later */
63377 if (unlikely(pmd_trans_huge(*pmd)))
63378diff -urNp linux-3.0.4/mm/hugetlb.c linux-3.0.4/mm/hugetlb.c
63379--- linux-3.0.4/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
63380+++ linux-3.0.4/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
63381@@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
63382 return 1;
63383 }
63384
63385+#ifdef CONFIG_PAX_SEGMEXEC
63386+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
63387+{
63388+ struct mm_struct *mm = vma->vm_mm;
63389+ struct vm_area_struct *vma_m;
63390+ unsigned long address_m;
63391+ pte_t *ptep_m;
63392+
63393+ vma_m = pax_find_mirror_vma(vma);
63394+ if (!vma_m)
63395+ return;
63396+
63397+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63398+ address_m = address + SEGMEXEC_TASK_SIZE;
63399+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
63400+ get_page(page_m);
63401+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
63402+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
63403+}
63404+#endif
63405+
63406 /*
63407 * Hugetlb_cow() should be called with page lock of the original hugepage held.
63408 */
63409@@ -2440,6 +2461,11 @@ retry_avoidcopy:
63410 make_huge_pte(vma, new_page, 1));
63411 page_remove_rmap(old_page);
63412 hugepage_add_new_anon_rmap(new_page, vma, address);
63413+
63414+#ifdef CONFIG_PAX_SEGMEXEC
63415+ pax_mirror_huge_pte(vma, address, new_page);
63416+#endif
63417+
63418 /* Make the old page be freed below */
63419 new_page = old_page;
63420 mmu_notifier_invalidate_range_end(mm,
63421@@ -2591,6 +2617,10 @@ retry:
63422 && (vma->vm_flags & VM_SHARED)));
63423 set_huge_pte_at(mm, address, ptep, new_pte);
63424
63425+#ifdef CONFIG_PAX_SEGMEXEC
63426+ pax_mirror_huge_pte(vma, address, page);
63427+#endif
63428+
63429 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
63430 /* Optimization, do the COW without a second fault */
63431 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
63432@@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
63433 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
63434 struct hstate *h = hstate_vma(vma);
63435
63436+#ifdef CONFIG_PAX_SEGMEXEC
63437+ struct vm_area_struct *vma_m;
63438+#endif
63439+
63440 ptep = huge_pte_offset(mm, address);
63441 if (ptep) {
63442 entry = huge_ptep_get(ptep);
63443@@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
63444 VM_FAULT_SET_HINDEX(h - hstates);
63445 }
63446
63447+#ifdef CONFIG_PAX_SEGMEXEC
63448+ vma_m = pax_find_mirror_vma(vma);
63449+ if (vma_m) {
63450+ unsigned long address_m;
63451+
63452+ if (vma->vm_start > vma_m->vm_start) {
63453+ address_m = address;
63454+ address -= SEGMEXEC_TASK_SIZE;
63455+ vma = vma_m;
63456+ h = hstate_vma(vma);
63457+ } else
63458+ address_m = address + SEGMEXEC_TASK_SIZE;
63459+
63460+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
63461+ return VM_FAULT_OOM;
63462+ address_m &= HPAGE_MASK;
63463+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
63464+ }
63465+#endif
63466+
63467 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
63468 if (!ptep)
63469 return VM_FAULT_OOM;
63470diff -urNp linux-3.0.4/mm/internal.h linux-3.0.4/mm/internal.h
63471--- linux-3.0.4/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
63472+++ linux-3.0.4/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
63473@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
63474 * in mm/page_alloc.c
63475 */
63476 extern void __free_pages_bootmem(struct page *page, unsigned int order);
63477+extern void free_compound_page(struct page *page);
63478 extern void prep_compound_page(struct page *page, unsigned long order);
63479 #ifdef CONFIG_MEMORY_FAILURE
63480 extern bool is_free_buddy_page(struct page *page);
63481diff -urNp linux-3.0.4/mm/Kconfig linux-3.0.4/mm/Kconfig
63482--- linux-3.0.4/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
63483+++ linux-3.0.4/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
63484@@ -240,7 +240,7 @@ config KSM
63485 config DEFAULT_MMAP_MIN_ADDR
63486 int "Low address space to protect from user allocation"
63487 depends on MMU
63488- default 4096
63489+ default 65536
63490 help
63491 This is the portion of low virtual memory which should be protected
63492 from userspace allocation. Keeping a user from writing to low pages
63493diff -urNp linux-3.0.4/mm/kmemleak.c linux-3.0.4/mm/kmemleak.c
63494--- linux-3.0.4/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
63495+++ linux-3.0.4/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
63496@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
63497
63498 for (i = 0; i < object->trace_len; i++) {
63499 void *ptr = (void *)object->trace[i];
63500- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
63501+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
63502 }
63503 }
63504
63505diff -urNp linux-3.0.4/mm/madvise.c linux-3.0.4/mm/madvise.c
63506--- linux-3.0.4/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
63507+++ linux-3.0.4/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
63508@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
63509 pgoff_t pgoff;
63510 unsigned long new_flags = vma->vm_flags;
63511
63512+#ifdef CONFIG_PAX_SEGMEXEC
63513+ struct vm_area_struct *vma_m;
63514+#endif
63515+
63516 switch (behavior) {
63517 case MADV_NORMAL:
63518 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
63519@@ -110,6 +114,13 @@ success:
63520 /*
63521 * vm_flags is protected by the mmap_sem held in write mode.
63522 */
63523+
63524+#ifdef CONFIG_PAX_SEGMEXEC
63525+ vma_m = pax_find_mirror_vma(vma);
63526+ if (vma_m)
63527+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
63528+#endif
63529+
63530 vma->vm_flags = new_flags;
63531
63532 out:
63533@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
63534 struct vm_area_struct ** prev,
63535 unsigned long start, unsigned long end)
63536 {
63537+
63538+#ifdef CONFIG_PAX_SEGMEXEC
63539+ struct vm_area_struct *vma_m;
63540+#endif
63541+
63542 *prev = vma;
63543 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
63544 return -EINVAL;
63545@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
63546 zap_page_range(vma, start, end - start, &details);
63547 } else
63548 zap_page_range(vma, start, end - start, NULL);
63549+
63550+#ifdef CONFIG_PAX_SEGMEXEC
63551+ vma_m = pax_find_mirror_vma(vma);
63552+ if (vma_m) {
63553+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
63554+ struct zap_details details = {
63555+ .nonlinear_vma = vma_m,
63556+ .last_index = ULONG_MAX,
63557+ };
63558+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
63559+ } else
63560+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
63561+ }
63562+#endif
63563+
63564 return 0;
63565 }
63566
63567@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
63568 if (end < start)
63569 goto out;
63570
63571+#ifdef CONFIG_PAX_SEGMEXEC
63572+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63573+ if (end > SEGMEXEC_TASK_SIZE)
63574+ goto out;
63575+ } else
63576+#endif
63577+
63578+ if (end > TASK_SIZE)
63579+ goto out;
63580+
63581 error = 0;
63582 if (end == start)
63583 goto out;
63584diff -urNp linux-3.0.4/mm/memory.c linux-3.0.4/mm/memory.c
63585--- linux-3.0.4/mm/memory.c 2011-08-23 21:44:40.000000000 -0400
63586+++ linux-3.0.4/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
63587@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
63588 return;
63589
63590 pmd = pmd_offset(pud, start);
63591+
63592+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
63593 pud_clear(pud);
63594 pmd_free_tlb(tlb, pmd, start);
63595+#endif
63596+
63597 }
63598
63599 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
63600@@ -489,9 +493,12 @@ static inline void free_pud_range(struct
63601 if (end - 1 > ceiling - 1)
63602 return;
63603
63604+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
63605 pud = pud_offset(pgd, start);
63606 pgd_clear(pgd);
63607 pud_free_tlb(tlb, pud, start);
63608+#endif
63609+
63610 }
63611
63612 /*
63613@@ -1577,12 +1584,6 @@ no_page_table:
63614 return page;
63615 }
63616
63617-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
63618-{
63619- return stack_guard_page_start(vma, addr) ||
63620- stack_guard_page_end(vma, addr+PAGE_SIZE);
63621-}
63622-
63623 /**
63624 * __get_user_pages() - pin user pages in memory
63625 * @tsk: task_struct of target task
63626@@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
63627 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
63628 i = 0;
63629
63630- do {
63631+ while (nr_pages) {
63632 struct vm_area_struct *vma;
63633
63634- vma = find_extend_vma(mm, start);
63635+ vma = find_vma(mm, start);
63636 if (!vma && in_gate_area(mm, start)) {
63637 unsigned long pg = start & PAGE_MASK;
63638 pgd_t *pgd;
63639@@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
63640 goto next_page;
63641 }
63642
63643- if (!vma ||
63644+ if (!vma || start < vma->vm_start ||
63645 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
63646 !(vm_flags & vma->vm_flags))
63647 return i ? : -EFAULT;
63648@@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
63649 int ret;
63650 unsigned int fault_flags = 0;
63651
63652- /* For mlock, just skip the stack guard page. */
63653- if (foll_flags & FOLL_MLOCK) {
63654- if (stack_guard_page(vma, start))
63655- goto next_page;
63656- }
63657 if (foll_flags & FOLL_WRITE)
63658 fault_flags |= FAULT_FLAG_WRITE;
63659 if (nonblocking)
63660@@ -1811,7 +1807,7 @@ next_page:
63661 start += PAGE_SIZE;
63662 nr_pages--;
63663 } while (nr_pages && start < vma->vm_end);
63664- } while (nr_pages);
63665+ }
63666 return i;
63667 }
63668 EXPORT_SYMBOL(__get_user_pages);
63669@@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
63670 page_add_file_rmap(page);
63671 set_pte_at(mm, addr, pte, mk_pte(page, prot));
63672
63673+#ifdef CONFIG_PAX_SEGMEXEC
63674+ pax_mirror_file_pte(vma, addr, page, ptl);
63675+#endif
63676+
63677 retval = 0;
63678 pte_unmap_unlock(pte, ptl);
63679 return retval;
63680@@ -2052,10 +2052,22 @@ out:
63681 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
63682 struct page *page)
63683 {
63684+
63685+#ifdef CONFIG_PAX_SEGMEXEC
63686+ struct vm_area_struct *vma_m;
63687+#endif
63688+
63689 if (addr < vma->vm_start || addr >= vma->vm_end)
63690 return -EFAULT;
63691 if (!page_count(page))
63692 return -EINVAL;
63693+
63694+#ifdef CONFIG_PAX_SEGMEXEC
63695+ vma_m = pax_find_mirror_vma(vma);
63696+ if (vma_m)
63697+ vma_m->vm_flags |= VM_INSERTPAGE;
63698+#endif
63699+
63700 vma->vm_flags |= VM_INSERTPAGE;
63701 return insert_page(vma, addr, page, vma->vm_page_prot);
63702 }
63703@@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
63704 unsigned long pfn)
63705 {
63706 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
63707+ BUG_ON(vma->vm_mirror);
63708
63709 if (addr < vma->vm_start || addr >= vma->vm_end)
63710 return -EFAULT;
63711@@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
63712 copy_user_highpage(dst, src, va, vma);
63713 }
63714
63715+#ifdef CONFIG_PAX_SEGMEXEC
63716+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
63717+{
63718+ struct mm_struct *mm = vma->vm_mm;
63719+ spinlock_t *ptl;
63720+ pte_t *pte, entry;
63721+
63722+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
63723+ entry = *pte;
63724+ if (!pte_present(entry)) {
63725+ if (!pte_none(entry)) {
63726+ BUG_ON(pte_file(entry));
63727+ free_swap_and_cache(pte_to_swp_entry(entry));
63728+ pte_clear_not_present_full(mm, address, pte, 0);
63729+ }
63730+ } else {
63731+ struct page *page;
63732+
63733+ flush_cache_page(vma, address, pte_pfn(entry));
63734+ entry = ptep_clear_flush(vma, address, pte);
63735+ BUG_ON(pte_dirty(entry));
63736+ page = vm_normal_page(vma, address, entry);
63737+ if (page) {
63738+ update_hiwater_rss(mm);
63739+ if (PageAnon(page))
63740+ dec_mm_counter_fast(mm, MM_ANONPAGES);
63741+ else
63742+ dec_mm_counter_fast(mm, MM_FILEPAGES);
63743+ page_remove_rmap(page);
63744+ page_cache_release(page);
63745+ }
63746+ }
63747+ pte_unmap_unlock(pte, ptl);
63748+}
63749+
63750+/* PaX: if vma is mirrored, synchronize the mirror's PTE
63751+ *
63752+ * the ptl of the lower mapped page is held on entry and is not released on exit
63753+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
63754+ */
63755+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63756+{
63757+ struct mm_struct *mm = vma->vm_mm;
63758+ unsigned long address_m;
63759+ spinlock_t *ptl_m;
63760+ struct vm_area_struct *vma_m;
63761+ pmd_t *pmd_m;
63762+ pte_t *pte_m, entry_m;
63763+
63764+ BUG_ON(!page_m || !PageAnon(page_m));
63765+
63766+ vma_m = pax_find_mirror_vma(vma);
63767+ if (!vma_m)
63768+ return;
63769+
63770+ BUG_ON(!PageLocked(page_m));
63771+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63772+ address_m = address + SEGMEXEC_TASK_SIZE;
63773+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63774+ pte_m = pte_offset_map(pmd_m, address_m);
63775+ ptl_m = pte_lockptr(mm, pmd_m);
63776+ if (ptl != ptl_m) {
63777+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63778+ if (!pte_none(*pte_m))
63779+ goto out;
63780+ }
63781+
63782+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63783+ page_cache_get(page_m);
63784+ page_add_anon_rmap(page_m, vma_m, address_m);
63785+ inc_mm_counter_fast(mm, MM_ANONPAGES);
63786+ set_pte_at(mm, address_m, pte_m, entry_m);
63787+ update_mmu_cache(vma_m, address_m, entry_m);
63788+out:
63789+ if (ptl != ptl_m)
63790+ spin_unlock(ptl_m);
63791+ pte_unmap(pte_m);
63792+ unlock_page(page_m);
63793+}
63794+
63795+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63796+{
63797+ struct mm_struct *mm = vma->vm_mm;
63798+ unsigned long address_m;
63799+ spinlock_t *ptl_m;
63800+ struct vm_area_struct *vma_m;
63801+ pmd_t *pmd_m;
63802+ pte_t *pte_m, entry_m;
63803+
63804+ BUG_ON(!page_m || PageAnon(page_m));
63805+
63806+ vma_m = pax_find_mirror_vma(vma);
63807+ if (!vma_m)
63808+ return;
63809+
63810+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63811+ address_m = address + SEGMEXEC_TASK_SIZE;
63812+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63813+ pte_m = pte_offset_map(pmd_m, address_m);
63814+ ptl_m = pte_lockptr(mm, pmd_m);
63815+ if (ptl != ptl_m) {
63816+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63817+ if (!pte_none(*pte_m))
63818+ goto out;
63819+ }
63820+
63821+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63822+ page_cache_get(page_m);
63823+ page_add_file_rmap(page_m);
63824+ inc_mm_counter_fast(mm, MM_FILEPAGES);
63825+ set_pte_at(mm, address_m, pte_m, entry_m);
63826+ update_mmu_cache(vma_m, address_m, entry_m);
63827+out:
63828+ if (ptl != ptl_m)
63829+ spin_unlock(ptl_m);
63830+ pte_unmap(pte_m);
63831+}
63832+
63833+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
63834+{
63835+ struct mm_struct *mm = vma->vm_mm;
63836+ unsigned long address_m;
63837+ spinlock_t *ptl_m;
63838+ struct vm_area_struct *vma_m;
63839+ pmd_t *pmd_m;
63840+ pte_t *pte_m, entry_m;
63841+
63842+ vma_m = pax_find_mirror_vma(vma);
63843+ if (!vma_m)
63844+ return;
63845+
63846+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63847+ address_m = address + SEGMEXEC_TASK_SIZE;
63848+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63849+ pte_m = pte_offset_map(pmd_m, address_m);
63850+ ptl_m = pte_lockptr(mm, pmd_m);
63851+ if (ptl != ptl_m) {
63852+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63853+ if (!pte_none(*pte_m))
63854+ goto out;
63855+ }
63856+
63857+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
63858+ set_pte_at(mm, address_m, pte_m, entry_m);
63859+out:
63860+ if (ptl != ptl_m)
63861+ spin_unlock(ptl_m);
63862+ pte_unmap(pte_m);
63863+}
63864+
63865+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
63866+{
63867+ struct page *page_m;
63868+ pte_t entry;
63869+
63870+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
63871+ goto out;
63872+
63873+ entry = *pte;
63874+ page_m = vm_normal_page(vma, address, entry);
63875+ if (!page_m)
63876+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
63877+ else if (PageAnon(page_m)) {
63878+ if (pax_find_mirror_vma(vma)) {
63879+ pte_unmap_unlock(pte, ptl);
63880+ lock_page(page_m);
63881+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
63882+ if (pte_same(entry, *pte))
63883+ pax_mirror_anon_pte(vma, address, page_m, ptl);
63884+ else
63885+ unlock_page(page_m);
63886+ }
63887+ } else
63888+ pax_mirror_file_pte(vma, address, page_m, ptl);
63889+
63890+out:
63891+ pte_unmap_unlock(pte, ptl);
63892+}
63893+#endif
63894+
63895 /*
63896 * This routine handles present pages, when users try to write
63897 * to a shared page. It is done by copying the page to a new address
63898@@ -2667,6 +2860,12 @@ gotten:
63899 */
63900 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63901 if (likely(pte_same(*page_table, orig_pte))) {
63902+
63903+#ifdef CONFIG_PAX_SEGMEXEC
63904+ if (pax_find_mirror_vma(vma))
63905+ BUG_ON(!trylock_page(new_page));
63906+#endif
63907+
63908 if (old_page) {
63909 if (!PageAnon(old_page)) {
63910 dec_mm_counter_fast(mm, MM_FILEPAGES);
63911@@ -2718,6 +2917,10 @@ gotten:
63912 page_remove_rmap(old_page);
63913 }
63914
63915+#ifdef CONFIG_PAX_SEGMEXEC
63916+ pax_mirror_anon_pte(vma, address, new_page, ptl);
63917+#endif
63918+
63919 /* Free the old page.. */
63920 new_page = old_page;
63921 ret |= VM_FAULT_WRITE;
63922@@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
63923 swap_free(entry);
63924 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
63925 try_to_free_swap(page);
63926+
63927+#ifdef CONFIG_PAX_SEGMEXEC
63928+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
63929+#endif
63930+
63931 unlock_page(page);
63932 if (swapcache) {
63933 /*
63934@@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
63935
63936 /* No need to invalidate - it was non-present before */
63937 update_mmu_cache(vma, address, page_table);
63938+
63939+#ifdef CONFIG_PAX_SEGMEXEC
63940+ pax_mirror_anon_pte(vma, address, page, ptl);
63941+#endif
63942+
63943 unlock:
63944 pte_unmap_unlock(page_table, ptl);
63945 out:
63946@@ -3039,40 +3252,6 @@ out_release:
63947 }
63948
63949 /*
63950- * This is like a special single-page "expand_{down|up}wards()",
63951- * except we must first make sure that 'address{-|+}PAGE_SIZE'
63952- * doesn't hit another vma.
63953- */
63954-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
63955-{
63956- address &= PAGE_MASK;
63957- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
63958- struct vm_area_struct *prev = vma->vm_prev;
63959-
63960- /*
63961- * Is there a mapping abutting this one below?
63962- *
63963- * That's only ok if it's the same stack mapping
63964- * that has gotten split..
63965- */
63966- if (prev && prev->vm_end == address)
63967- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
63968-
63969- expand_downwards(vma, address - PAGE_SIZE);
63970- }
63971- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
63972- struct vm_area_struct *next = vma->vm_next;
63973-
63974- /* As VM_GROWSDOWN but s/below/above/ */
63975- if (next && next->vm_start == address + PAGE_SIZE)
63976- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
63977-
63978- expand_upwards(vma, address + PAGE_SIZE);
63979- }
63980- return 0;
63981-}
63982-
63983-/*
63984 * We enter with non-exclusive mmap_sem (to exclude vma changes,
63985 * but allow concurrent faults), and pte mapped but not yet locked.
63986 * We return with mmap_sem still held, but pte unmapped and unlocked.
63987@@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
63988 unsigned long address, pte_t *page_table, pmd_t *pmd,
63989 unsigned int flags)
63990 {
63991- struct page *page;
63992+ struct page *page = NULL;
63993 spinlock_t *ptl;
63994 pte_t entry;
63995
63996- pte_unmap(page_table);
63997-
63998- /* Check if we need to add a guard page to the stack */
63999- if (check_stack_guard_page(vma, address) < 0)
64000- return VM_FAULT_SIGBUS;
64001-
64002- /* Use the zero-page for reads */
64003 if (!(flags & FAULT_FLAG_WRITE)) {
64004 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
64005 vma->vm_page_prot));
64006- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
64007+ ptl = pte_lockptr(mm, pmd);
64008+ spin_lock(ptl);
64009 if (!pte_none(*page_table))
64010 goto unlock;
64011 goto setpte;
64012 }
64013
64014 /* Allocate our own private page. */
64015+ pte_unmap(page_table);
64016+
64017 if (unlikely(anon_vma_prepare(vma)))
64018 goto oom;
64019 page = alloc_zeroed_user_highpage_movable(vma, address);
64020@@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
64021 if (!pte_none(*page_table))
64022 goto release;
64023
64024+#ifdef CONFIG_PAX_SEGMEXEC
64025+ if (pax_find_mirror_vma(vma))
64026+ BUG_ON(!trylock_page(page));
64027+#endif
64028+
64029 inc_mm_counter_fast(mm, MM_ANONPAGES);
64030 page_add_new_anon_rmap(page, vma, address);
64031 setpte:
64032@@ -3127,6 +3307,12 @@ setpte:
64033
64034 /* No need to invalidate - it was non-present before */
64035 update_mmu_cache(vma, address, page_table);
64036+
64037+#ifdef CONFIG_PAX_SEGMEXEC
64038+ if (page)
64039+ pax_mirror_anon_pte(vma, address, page, ptl);
64040+#endif
64041+
64042 unlock:
64043 pte_unmap_unlock(page_table, ptl);
64044 return 0;
64045@@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
64046 */
64047 /* Only go through if we didn't race with anybody else... */
64048 if (likely(pte_same(*page_table, orig_pte))) {
64049+
64050+#ifdef CONFIG_PAX_SEGMEXEC
64051+ if (anon && pax_find_mirror_vma(vma))
64052+ BUG_ON(!trylock_page(page));
64053+#endif
64054+
64055 flush_icache_page(vma, page);
64056 entry = mk_pte(page, vma->vm_page_prot);
64057 if (flags & FAULT_FLAG_WRITE)
64058@@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
64059
64060 /* no need to invalidate: a not-present page won't be cached */
64061 update_mmu_cache(vma, address, page_table);
64062+
64063+#ifdef CONFIG_PAX_SEGMEXEC
64064+ if (anon)
64065+ pax_mirror_anon_pte(vma, address, page, ptl);
64066+ else
64067+ pax_mirror_file_pte(vma, address, page, ptl);
64068+#endif
64069+
64070 } else {
64071 if (charged)
64072 mem_cgroup_uncharge_page(page);
64073@@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
64074 if (flags & FAULT_FLAG_WRITE)
64075 flush_tlb_fix_spurious_fault(vma, address);
64076 }
64077+
64078+#ifdef CONFIG_PAX_SEGMEXEC
64079+ pax_mirror_pte(vma, address, pte, pmd, ptl);
64080+ return 0;
64081+#endif
64082+
64083 unlock:
64084 pte_unmap_unlock(pte, ptl);
64085 return 0;
64086@@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
64087 pmd_t *pmd;
64088 pte_t *pte;
64089
64090+#ifdef CONFIG_PAX_SEGMEXEC
64091+ struct vm_area_struct *vma_m;
64092+#endif
64093+
64094 __set_current_state(TASK_RUNNING);
64095
64096 count_vm_event(PGFAULT);
64097@@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
64098 if (unlikely(is_vm_hugetlb_page(vma)))
64099 return hugetlb_fault(mm, vma, address, flags);
64100
64101+#ifdef CONFIG_PAX_SEGMEXEC
64102+ vma_m = pax_find_mirror_vma(vma);
64103+ if (vma_m) {
64104+ unsigned long address_m;
64105+ pgd_t *pgd_m;
64106+ pud_t *pud_m;
64107+ pmd_t *pmd_m;
64108+
64109+ if (vma->vm_start > vma_m->vm_start) {
64110+ address_m = address;
64111+ address -= SEGMEXEC_TASK_SIZE;
64112+ vma = vma_m;
64113+ } else
64114+ address_m = address + SEGMEXEC_TASK_SIZE;
64115+
64116+ pgd_m = pgd_offset(mm, address_m);
64117+ pud_m = pud_alloc(mm, pgd_m, address_m);
64118+ if (!pud_m)
64119+ return VM_FAULT_OOM;
64120+ pmd_m = pmd_alloc(mm, pud_m, address_m);
64121+ if (!pmd_m)
64122+ return VM_FAULT_OOM;
64123+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
64124+ return VM_FAULT_OOM;
64125+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
64126+ }
64127+#endif
64128+
64129 pgd = pgd_offset(mm, address);
64130 pud = pud_alloc(mm, pgd, address);
64131 if (!pud)
64132@@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
64133 * run pte_offset_map on the pmd, if an huge pmd could
64134 * materialize from under us from a different thread.
64135 */
64136- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
64137+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
64138 return VM_FAULT_OOM;
64139 /* if an huge pmd materialized from under us just retry later */
64140 if (unlikely(pmd_trans_huge(*pmd)))
64141@@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
64142 gate_vma.vm_start = FIXADDR_USER_START;
64143 gate_vma.vm_end = FIXADDR_USER_END;
64144 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
64145- gate_vma.vm_page_prot = __P101;
64146+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
64147 /*
64148 * Make sure the vDSO gets into every core dump.
64149 * Dumping its contents makes post-mortem fully interpretable later
64150diff -urNp linux-3.0.4/mm/memory-failure.c linux-3.0.4/mm/memory-failure.c
64151--- linux-3.0.4/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
64152+++ linux-3.0.4/mm/memory-failure.c 2011-08-23 21:47:56.000000000 -0400
64153@@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
64154
64155 int sysctl_memory_failure_recovery __read_mostly = 1;
64156
64157-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64158+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64159
64160 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
64161
64162@@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
64163 }
64164
64165 nr_pages = 1 << compound_trans_order(hpage);
64166- atomic_long_add(nr_pages, &mce_bad_pages);
64167+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
64168
64169 /*
64170 * We need/can do nothing about count=0 pages.
64171@@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
64172 if (!PageHWPoison(hpage)
64173 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
64174 || (p != hpage && TestSetPageHWPoison(hpage))) {
64175- atomic_long_sub(nr_pages, &mce_bad_pages);
64176+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64177 return 0;
64178 }
64179 set_page_hwpoison_huge_page(hpage);
64180@@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
64181 }
64182 if (hwpoison_filter(p)) {
64183 if (TestClearPageHWPoison(p))
64184- atomic_long_sub(nr_pages, &mce_bad_pages);
64185+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64186 unlock_page(hpage);
64187 put_page(hpage);
64188 return 0;
64189@@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
64190 return 0;
64191 }
64192 if (TestClearPageHWPoison(p))
64193- atomic_long_sub(nr_pages, &mce_bad_pages);
64194+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64195 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
64196 return 0;
64197 }
64198@@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
64199 */
64200 if (TestClearPageHWPoison(page)) {
64201 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
64202- atomic_long_sub(nr_pages, &mce_bad_pages);
64203+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64204 freeit = 1;
64205 if (PageHuge(page))
64206 clear_page_hwpoison_huge_page(page);
64207@@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
64208 }
64209 done:
64210 if (!PageHWPoison(hpage))
64211- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
64212+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
64213 set_page_hwpoison_huge_page(hpage);
64214 dequeue_hwpoisoned_huge_page(hpage);
64215 /* keep elevated page count for bad page */
64216@@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
64217 return ret;
64218
64219 done:
64220- atomic_long_add(1, &mce_bad_pages);
64221+ atomic_long_add_unchecked(1, &mce_bad_pages);
64222 SetPageHWPoison(page);
64223 /* keep elevated page count for bad page */
64224 return ret;
64225diff -urNp linux-3.0.4/mm/mempolicy.c linux-3.0.4/mm/mempolicy.c
64226--- linux-3.0.4/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
64227+++ linux-3.0.4/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
64228@@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
64229 unsigned long vmstart;
64230 unsigned long vmend;
64231
64232+#ifdef CONFIG_PAX_SEGMEXEC
64233+ struct vm_area_struct *vma_m;
64234+#endif
64235+
64236 vma = find_vma_prev(mm, start, &prev);
64237 if (!vma || vma->vm_start > start)
64238 return -EFAULT;
64239@@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
64240 err = policy_vma(vma, new_pol);
64241 if (err)
64242 goto out;
64243+
64244+#ifdef CONFIG_PAX_SEGMEXEC
64245+ vma_m = pax_find_mirror_vma(vma);
64246+ if (vma_m) {
64247+ err = policy_vma(vma_m, new_pol);
64248+ if (err)
64249+ goto out;
64250+ }
64251+#endif
64252+
64253 }
64254
64255 out:
64256@@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
64257
64258 if (end < start)
64259 return -EINVAL;
64260+
64261+#ifdef CONFIG_PAX_SEGMEXEC
64262+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
64263+ if (end > SEGMEXEC_TASK_SIZE)
64264+ return -EINVAL;
64265+ } else
64266+#endif
64267+
64268+ if (end > TASK_SIZE)
64269+ return -EINVAL;
64270+
64271 if (end == start)
64272 return 0;
64273
64274@@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64275 if (!mm)
64276 goto out;
64277
64278+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64279+ if (mm != current->mm &&
64280+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64281+ err = -EPERM;
64282+ goto out;
64283+ }
64284+#endif
64285+
64286 /*
64287 * Check if this process has the right to modify the specified
64288 * process. The right exists if the process has administrative
64289@@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64290 rcu_read_lock();
64291 tcred = __task_cred(task);
64292 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64293- cred->uid != tcred->suid && cred->uid != tcred->uid &&
64294- !capable(CAP_SYS_NICE)) {
64295+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64296 rcu_read_unlock();
64297 err = -EPERM;
64298 goto out;
64299diff -urNp linux-3.0.4/mm/migrate.c linux-3.0.4/mm/migrate.c
64300--- linux-3.0.4/mm/migrate.c 2011-07-21 22:17:23.000000000 -0400
64301+++ linux-3.0.4/mm/migrate.c 2011-08-23 21:48:14.000000000 -0400
64302@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
64303 unsigned long chunk_start;
64304 int err;
64305
64306+ pax_track_stack();
64307+
64308 task_nodes = cpuset_mems_allowed(task);
64309
64310 err = -ENOMEM;
64311@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64312 if (!mm)
64313 return -EINVAL;
64314
64315+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64316+ if (mm != current->mm &&
64317+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64318+ err = -EPERM;
64319+ goto out;
64320+ }
64321+#endif
64322+
64323 /*
64324 * Check if this process has the right to modify the specified
64325 * process. The right exists if the process has administrative
64326@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64327 rcu_read_lock();
64328 tcred = __task_cred(task);
64329 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64330- cred->uid != tcred->suid && cred->uid != tcred->uid &&
64331- !capable(CAP_SYS_NICE)) {
64332+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64333 rcu_read_unlock();
64334 err = -EPERM;
64335 goto out;
64336diff -urNp linux-3.0.4/mm/mlock.c linux-3.0.4/mm/mlock.c
64337--- linux-3.0.4/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
64338+++ linux-3.0.4/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
64339@@ -13,6 +13,7 @@
64340 #include <linux/pagemap.h>
64341 #include <linux/mempolicy.h>
64342 #include <linux/syscalls.h>
64343+#include <linux/security.h>
64344 #include <linux/sched.h>
64345 #include <linux/module.h>
64346 #include <linux/rmap.h>
64347@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
64348 return -EINVAL;
64349 if (end == start)
64350 return 0;
64351+ if (end > TASK_SIZE)
64352+ return -EINVAL;
64353+
64354 vma = find_vma_prev(current->mm, start, &prev);
64355 if (!vma || vma->vm_start > start)
64356 return -ENOMEM;
64357@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
64358 for (nstart = start ; ; ) {
64359 vm_flags_t newflags;
64360
64361+#ifdef CONFIG_PAX_SEGMEXEC
64362+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64363+ break;
64364+#endif
64365+
64366 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
64367
64368 newflags = vma->vm_flags | VM_LOCKED;
64369@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
64370 lock_limit >>= PAGE_SHIFT;
64371
64372 /* check against resource limits */
64373+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
64374 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
64375 error = do_mlock(start, len, 1);
64376 up_write(&current->mm->mmap_sem);
64377@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
64378 static int do_mlockall(int flags)
64379 {
64380 struct vm_area_struct * vma, * prev = NULL;
64381- unsigned int def_flags = 0;
64382
64383 if (flags & MCL_FUTURE)
64384- def_flags = VM_LOCKED;
64385- current->mm->def_flags = def_flags;
64386+ current->mm->def_flags |= VM_LOCKED;
64387+ else
64388+ current->mm->def_flags &= ~VM_LOCKED;
64389 if (flags == MCL_FUTURE)
64390 goto out;
64391
64392 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
64393 vm_flags_t newflags;
64394
64395+#ifdef CONFIG_PAX_SEGMEXEC
64396+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64397+ break;
64398+#endif
64399+
64400+ BUG_ON(vma->vm_end > TASK_SIZE);
64401 newflags = vma->vm_flags | VM_LOCKED;
64402 if (!(flags & MCL_CURRENT))
64403 newflags &= ~VM_LOCKED;
64404@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
64405 lock_limit >>= PAGE_SHIFT;
64406
64407 ret = -ENOMEM;
64408+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
64409 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
64410 capable(CAP_IPC_LOCK))
64411 ret = do_mlockall(flags);
64412diff -urNp linux-3.0.4/mm/mmap.c linux-3.0.4/mm/mmap.c
64413--- linux-3.0.4/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
64414+++ linux-3.0.4/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
64415@@ -46,6 +46,16 @@
64416 #define arch_rebalance_pgtables(addr, len) (addr)
64417 #endif
64418
64419+static inline void verify_mm_writelocked(struct mm_struct *mm)
64420+{
64421+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
64422+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
64423+ up_read(&mm->mmap_sem);
64424+ BUG();
64425+ }
64426+#endif
64427+}
64428+
64429 static void unmap_region(struct mm_struct *mm,
64430 struct vm_area_struct *vma, struct vm_area_struct *prev,
64431 unsigned long start, unsigned long end);
64432@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
64433 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
64434 *
64435 */
64436-pgprot_t protection_map[16] = {
64437+pgprot_t protection_map[16] __read_only = {
64438 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
64439 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
64440 };
64441
64442-pgprot_t vm_get_page_prot(unsigned long vm_flags)
64443+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
64444 {
64445- return __pgprot(pgprot_val(protection_map[vm_flags &
64446+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
64447 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
64448 pgprot_val(arch_vm_get_page_prot(vm_flags)));
64449+
64450+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64451+ if (!(__supported_pte_mask & _PAGE_NX) &&
64452+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
64453+ (vm_flags & (VM_READ | VM_WRITE)))
64454+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
64455+#endif
64456+
64457+ return prot;
64458 }
64459 EXPORT_SYMBOL(vm_get_page_prot);
64460
64461 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
64462 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
64463 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
64464+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
64465 /*
64466 * Make sure vm_committed_as in one cacheline and not cacheline shared with
64467 * other variables. It can be updated by several CPUs frequently.
64468@@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
64469 struct vm_area_struct *next = vma->vm_next;
64470
64471 might_sleep();
64472+ BUG_ON(vma->vm_mirror);
64473 if (vma->vm_ops && vma->vm_ops->close)
64474 vma->vm_ops->close(vma);
64475 if (vma->vm_file) {
64476@@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
64477 * not page aligned -Ram Gupta
64478 */
64479 rlim = rlimit(RLIMIT_DATA);
64480+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
64481 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
64482 (mm->end_data - mm->start_data) > rlim)
64483 goto out;
64484@@ -697,6 +719,12 @@ static int
64485 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
64486 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64487 {
64488+
64489+#ifdef CONFIG_PAX_SEGMEXEC
64490+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
64491+ return 0;
64492+#endif
64493+
64494 if (is_mergeable_vma(vma, file, vm_flags) &&
64495 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
64496 if (vma->vm_pgoff == vm_pgoff)
64497@@ -716,6 +744,12 @@ static int
64498 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
64499 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64500 {
64501+
64502+#ifdef CONFIG_PAX_SEGMEXEC
64503+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
64504+ return 0;
64505+#endif
64506+
64507 if (is_mergeable_vma(vma, file, vm_flags) &&
64508 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
64509 pgoff_t vm_pglen;
64510@@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
64511 struct vm_area_struct *vma_merge(struct mm_struct *mm,
64512 struct vm_area_struct *prev, unsigned long addr,
64513 unsigned long end, unsigned long vm_flags,
64514- struct anon_vma *anon_vma, struct file *file,
64515+ struct anon_vma *anon_vma, struct file *file,
64516 pgoff_t pgoff, struct mempolicy *policy)
64517 {
64518 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
64519 struct vm_area_struct *area, *next;
64520 int err;
64521
64522+#ifdef CONFIG_PAX_SEGMEXEC
64523+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
64524+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
64525+
64526+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
64527+#endif
64528+
64529 /*
64530 * We later require that vma->vm_flags == vm_flags,
64531 * so this tests vma->vm_flags & VM_SPECIAL, too.
64532@@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
64533 if (next && next->vm_end == end) /* cases 6, 7, 8 */
64534 next = next->vm_next;
64535
64536+#ifdef CONFIG_PAX_SEGMEXEC
64537+ if (prev)
64538+ prev_m = pax_find_mirror_vma(prev);
64539+ if (area)
64540+ area_m = pax_find_mirror_vma(area);
64541+ if (next)
64542+ next_m = pax_find_mirror_vma(next);
64543+#endif
64544+
64545 /*
64546 * Can it merge with the predecessor?
64547 */
64548@@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
64549 /* cases 1, 6 */
64550 err = vma_adjust(prev, prev->vm_start,
64551 next->vm_end, prev->vm_pgoff, NULL);
64552- } else /* cases 2, 5, 7 */
64553+
64554+#ifdef CONFIG_PAX_SEGMEXEC
64555+ if (!err && prev_m)
64556+ err = vma_adjust(prev_m, prev_m->vm_start,
64557+ next_m->vm_end, prev_m->vm_pgoff, NULL);
64558+#endif
64559+
64560+ } else { /* cases 2, 5, 7 */
64561 err = vma_adjust(prev, prev->vm_start,
64562 end, prev->vm_pgoff, NULL);
64563+
64564+#ifdef CONFIG_PAX_SEGMEXEC
64565+ if (!err && prev_m)
64566+ err = vma_adjust(prev_m, prev_m->vm_start,
64567+ end_m, prev_m->vm_pgoff, NULL);
64568+#endif
64569+
64570+ }
64571 if (err)
64572 return NULL;
64573 khugepaged_enter_vma_merge(prev);
64574@@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
64575 mpol_equal(policy, vma_policy(next)) &&
64576 can_vma_merge_before(next, vm_flags,
64577 anon_vma, file, pgoff+pglen)) {
64578- if (prev && addr < prev->vm_end) /* case 4 */
64579+ if (prev && addr < prev->vm_end) { /* case 4 */
64580 err = vma_adjust(prev, prev->vm_start,
64581 addr, prev->vm_pgoff, NULL);
64582- else /* cases 3, 8 */
64583+
64584+#ifdef CONFIG_PAX_SEGMEXEC
64585+ if (!err && prev_m)
64586+ err = vma_adjust(prev_m, prev_m->vm_start,
64587+ addr_m, prev_m->vm_pgoff, NULL);
64588+#endif
64589+
64590+ } else { /* cases 3, 8 */
64591 err = vma_adjust(area, addr, next->vm_end,
64592 next->vm_pgoff - pglen, NULL);
64593+
64594+#ifdef CONFIG_PAX_SEGMEXEC
64595+ if (!err && area_m)
64596+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
64597+ next_m->vm_pgoff - pglen, NULL);
64598+#endif
64599+
64600+ }
64601 if (err)
64602 return NULL;
64603 khugepaged_enter_vma_merge(area);
64604@@ -929,14 +1009,11 @@ none:
64605 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
64606 struct file *file, long pages)
64607 {
64608- const unsigned long stack_flags
64609- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
64610-
64611 if (file) {
64612 mm->shared_vm += pages;
64613 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
64614 mm->exec_vm += pages;
64615- } else if (flags & stack_flags)
64616+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
64617 mm->stack_vm += pages;
64618 if (flags & (VM_RESERVED|VM_IO))
64619 mm->reserved_vm += pages;
64620@@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
64621 * (the exception is when the underlying filesystem is noexec
64622 * mounted, in which case we dont add PROT_EXEC.)
64623 */
64624- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
64625+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
64626 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
64627 prot |= PROT_EXEC;
64628
64629@@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
64630 /* Obtain the address to map to. we verify (or select) it and ensure
64631 * that it represents a valid section of the address space.
64632 */
64633- addr = get_unmapped_area(file, addr, len, pgoff, flags);
64634+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
64635 if (addr & ~PAGE_MASK)
64636 return addr;
64637
64638@@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
64639 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
64640 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
64641
64642+#ifdef CONFIG_PAX_MPROTECT
64643+ if (mm->pax_flags & MF_PAX_MPROTECT) {
64644+#ifndef CONFIG_PAX_MPROTECT_COMPAT
64645+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
64646+ gr_log_rwxmmap(file);
64647+
64648+#ifdef CONFIG_PAX_EMUPLT
64649+ vm_flags &= ~VM_EXEC;
64650+#else
64651+ return -EPERM;
64652+#endif
64653+
64654+ }
64655+
64656+ if (!(vm_flags & VM_EXEC))
64657+ vm_flags &= ~VM_MAYEXEC;
64658+#else
64659+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
64660+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
64661+#endif
64662+ else
64663+ vm_flags &= ~VM_MAYWRITE;
64664+ }
64665+#endif
64666+
64667+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64668+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
64669+ vm_flags &= ~VM_PAGEEXEC;
64670+#endif
64671+
64672 if (flags & MAP_LOCKED)
64673 if (!can_do_mlock())
64674 return -EPERM;
64675@@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
64676 locked += mm->locked_vm;
64677 lock_limit = rlimit(RLIMIT_MEMLOCK);
64678 lock_limit >>= PAGE_SHIFT;
64679+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
64680 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
64681 return -EAGAIN;
64682 }
64683@@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
64684 if (error)
64685 return error;
64686
64687+ if (!gr_acl_handle_mmap(file, prot))
64688+ return -EACCES;
64689+
64690 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
64691 }
64692 EXPORT_SYMBOL(do_mmap_pgoff);
64693@@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
64694 vm_flags_t vm_flags = vma->vm_flags;
64695
64696 /* If it was private or non-writable, the write bit is already clear */
64697- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
64698+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
64699 return 0;
64700
64701 /* The backer wishes to know when pages are first written to? */
64702@@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
64703 unsigned long charged = 0;
64704 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
64705
64706+#ifdef CONFIG_PAX_SEGMEXEC
64707+ struct vm_area_struct *vma_m = NULL;
64708+#endif
64709+
64710+ /*
64711+ * mm->mmap_sem is required to protect against another thread
64712+ * changing the mappings in case we sleep.
64713+ */
64714+ verify_mm_writelocked(mm);
64715+
64716 /* Clear old maps */
64717 error = -ENOMEM;
64718-munmap_back:
64719 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64720 if (vma && vma->vm_start < addr + len) {
64721 if (do_munmap(mm, addr, len))
64722 return -ENOMEM;
64723- goto munmap_back;
64724+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64725+ BUG_ON(vma && vma->vm_start < addr + len);
64726 }
64727
64728 /* Check against address space limit. */
64729@@ -1266,6 +1387,16 @@ munmap_back:
64730 goto unacct_error;
64731 }
64732
64733+#ifdef CONFIG_PAX_SEGMEXEC
64734+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
64735+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
64736+ if (!vma_m) {
64737+ error = -ENOMEM;
64738+ goto free_vma;
64739+ }
64740+ }
64741+#endif
64742+
64743 vma->vm_mm = mm;
64744 vma->vm_start = addr;
64745 vma->vm_end = addr + len;
64746@@ -1289,6 +1420,19 @@ munmap_back:
64747 error = file->f_op->mmap(file, vma);
64748 if (error)
64749 goto unmap_and_free_vma;
64750+
64751+#ifdef CONFIG_PAX_SEGMEXEC
64752+ if (vma_m && (vm_flags & VM_EXECUTABLE))
64753+ added_exe_file_vma(mm);
64754+#endif
64755+
64756+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64757+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
64758+ vma->vm_flags |= VM_PAGEEXEC;
64759+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
64760+ }
64761+#endif
64762+
64763 if (vm_flags & VM_EXECUTABLE)
64764 added_exe_file_vma(mm);
64765
64766@@ -1324,6 +1468,11 @@ munmap_back:
64767 vma_link(mm, vma, prev, rb_link, rb_parent);
64768 file = vma->vm_file;
64769
64770+#ifdef CONFIG_PAX_SEGMEXEC
64771+ if (vma_m)
64772+ BUG_ON(pax_mirror_vma(vma_m, vma));
64773+#endif
64774+
64775 /* Once vma denies write, undo our temporary denial count */
64776 if (correct_wcount)
64777 atomic_inc(&inode->i_writecount);
64778@@ -1332,6 +1481,7 @@ out:
64779
64780 mm->total_vm += len >> PAGE_SHIFT;
64781 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
64782+ track_exec_limit(mm, addr, addr + len, vm_flags);
64783 if (vm_flags & VM_LOCKED) {
64784 if (!mlock_vma_pages_range(vma, addr, addr + len))
64785 mm->locked_vm += (len >> PAGE_SHIFT);
64786@@ -1349,6 +1499,12 @@ unmap_and_free_vma:
64787 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
64788 charged = 0;
64789 free_vma:
64790+
64791+#ifdef CONFIG_PAX_SEGMEXEC
64792+ if (vma_m)
64793+ kmem_cache_free(vm_area_cachep, vma_m);
64794+#endif
64795+
64796 kmem_cache_free(vm_area_cachep, vma);
64797 unacct_error:
64798 if (charged)
64799@@ -1356,6 +1512,44 @@ unacct_error:
64800 return error;
64801 }
64802
64803+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
64804+{
64805+ if (!vma) {
64806+#ifdef CONFIG_STACK_GROWSUP
64807+ if (addr > sysctl_heap_stack_gap)
64808+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
64809+ else
64810+ vma = find_vma(current->mm, 0);
64811+ if (vma && (vma->vm_flags & VM_GROWSUP))
64812+ return false;
64813+#endif
64814+ return true;
64815+ }
64816+
64817+ if (addr + len > vma->vm_start)
64818+ return false;
64819+
64820+ if (vma->vm_flags & VM_GROWSDOWN)
64821+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
64822+#ifdef CONFIG_STACK_GROWSUP
64823+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
64824+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
64825+#endif
64826+
64827+ return true;
64828+}
64829+
64830+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
64831+{
64832+ if (vma->vm_start < len)
64833+ return -ENOMEM;
64834+ if (!(vma->vm_flags & VM_GROWSDOWN))
64835+ return vma->vm_start - len;
64836+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
64837+ return vma->vm_start - len - sysctl_heap_stack_gap;
64838+ return -ENOMEM;
64839+}
64840+
64841 /* Get an address range which is currently unmapped.
64842 * For shmat() with addr=0.
64843 *
64844@@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
64845 if (flags & MAP_FIXED)
64846 return addr;
64847
64848+#ifdef CONFIG_PAX_RANDMMAP
64849+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64850+#endif
64851+
64852 if (addr) {
64853 addr = PAGE_ALIGN(addr);
64854- vma = find_vma(mm, addr);
64855- if (TASK_SIZE - len >= addr &&
64856- (!vma || addr + len <= vma->vm_start))
64857- return addr;
64858+ if (TASK_SIZE - len >= addr) {
64859+ vma = find_vma(mm, addr);
64860+ if (check_heap_stack_gap(vma, addr, len))
64861+ return addr;
64862+ }
64863 }
64864 if (len > mm->cached_hole_size) {
64865- start_addr = addr = mm->free_area_cache;
64866+ start_addr = addr = mm->free_area_cache;
64867 } else {
64868- start_addr = addr = TASK_UNMAPPED_BASE;
64869- mm->cached_hole_size = 0;
64870+ start_addr = addr = mm->mmap_base;
64871+ mm->cached_hole_size = 0;
64872 }
64873
64874 full_search:
64875@@ -1404,34 +1603,40 @@ full_search:
64876 * Start a new search - just in case we missed
64877 * some holes.
64878 */
64879- if (start_addr != TASK_UNMAPPED_BASE) {
64880- addr = TASK_UNMAPPED_BASE;
64881- start_addr = addr;
64882+ if (start_addr != mm->mmap_base) {
64883+ start_addr = addr = mm->mmap_base;
64884 mm->cached_hole_size = 0;
64885 goto full_search;
64886 }
64887 return -ENOMEM;
64888 }
64889- if (!vma || addr + len <= vma->vm_start) {
64890- /*
64891- * Remember the place where we stopped the search:
64892- */
64893- mm->free_area_cache = addr + len;
64894- return addr;
64895- }
64896+ if (check_heap_stack_gap(vma, addr, len))
64897+ break;
64898 if (addr + mm->cached_hole_size < vma->vm_start)
64899 mm->cached_hole_size = vma->vm_start - addr;
64900 addr = vma->vm_end;
64901 }
64902+
64903+ /*
64904+ * Remember the place where we stopped the search:
64905+ */
64906+ mm->free_area_cache = addr + len;
64907+ return addr;
64908 }
64909 #endif
64910
64911 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
64912 {
64913+
64914+#ifdef CONFIG_PAX_SEGMEXEC
64915+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64916+ return;
64917+#endif
64918+
64919 /*
64920 * Is this a new hole at the lowest possible address?
64921 */
64922- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
64923+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
64924 mm->free_area_cache = addr;
64925 mm->cached_hole_size = ~0UL;
64926 }
64927@@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
64928 {
64929 struct vm_area_struct *vma;
64930 struct mm_struct *mm = current->mm;
64931- unsigned long addr = addr0;
64932+ unsigned long base = mm->mmap_base, addr = addr0;
64933
64934 /* requested length too big for entire address space */
64935 if (len > TASK_SIZE)
64936@@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
64937 if (flags & MAP_FIXED)
64938 return addr;
64939
64940+#ifdef CONFIG_PAX_RANDMMAP
64941+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64942+#endif
64943+
64944 /* requesting a specific address */
64945 if (addr) {
64946 addr = PAGE_ALIGN(addr);
64947- vma = find_vma(mm, addr);
64948- if (TASK_SIZE - len >= addr &&
64949- (!vma || addr + len <= vma->vm_start))
64950- return addr;
64951+ if (TASK_SIZE - len >= addr) {
64952+ vma = find_vma(mm, addr);
64953+ if (check_heap_stack_gap(vma, addr, len))
64954+ return addr;
64955+ }
64956 }
64957
64958 /* check if free_area_cache is useful for us */
64959@@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
64960 /* make sure it can fit in the remaining address space */
64961 if (addr > len) {
64962 vma = find_vma(mm, addr-len);
64963- if (!vma || addr <= vma->vm_start)
64964+ if (check_heap_stack_gap(vma, addr - len, len))
64965 /* remember the address as a hint for next time */
64966 return (mm->free_area_cache = addr-len);
64967 }
64968@@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
64969 * return with success:
64970 */
64971 vma = find_vma(mm, addr);
64972- if (!vma || addr+len <= vma->vm_start)
64973+ if (check_heap_stack_gap(vma, addr, len))
64974 /* remember the address as a hint for next time */
64975 return (mm->free_area_cache = addr);
64976
64977@@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
64978 mm->cached_hole_size = vma->vm_start - addr;
64979
64980 /* try just below the current vma->vm_start */
64981- addr = vma->vm_start-len;
64982- } while (len < vma->vm_start);
64983+ addr = skip_heap_stack_gap(vma, len);
64984+ } while (!IS_ERR_VALUE(addr));
64985
64986 bottomup:
64987 /*
64988@@ -1515,13 +1725,21 @@ bottomup:
64989 * can happen with large stack limits and large mmap()
64990 * allocations.
64991 */
64992+ mm->mmap_base = TASK_UNMAPPED_BASE;
64993+
64994+#ifdef CONFIG_PAX_RANDMMAP
64995+ if (mm->pax_flags & MF_PAX_RANDMMAP)
64996+ mm->mmap_base += mm->delta_mmap;
64997+#endif
64998+
64999+ mm->free_area_cache = mm->mmap_base;
65000 mm->cached_hole_size = ~0UL;
65001- mm->free_area_cache = TASK_UNMAPPED_BASE;
65002 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
65003 /*
65004 * Restore the topdown base:
65005 */
65006- mm->free_area_cache = mm->mmap_base;
65007+ mm->mmap_base = base;
65008+ mm->free_area_cache = base;
65009 mm->cached_hole_size = ~0UL;
65010
65011 return addr;
65012@@ -1530,6 +1748,12 @@ bottomup:
65013
65014 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
65015 {
65016+
65017+#ifdef CONFIG_PAX_SEGMEXEC
65018+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
65019+ return;
65020+#endif
65021+
65022 /*
65023 * Is this a new hole at the highest possible address?
65024 */
65025@@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
65026 mm->free_area_cache = addr;
65027
65028 /* dont allow allocations above current base */
65029- if (mm->free_area_cache > mm->mmap_base)
65030+ if (mm->free_area_cache > mm->mmap_base) {
65031 mm->free_area_cache = mm->mmap_base;
65032+ mm->cached_hole_size = ~0UL;
65033+ }
65034 }
65035
65036 unsigned long
65037@@ -1646,6 +1872,28 @@ out:
65038 return prev ? prev->vm_next : vma;
65039 }
65040
65041+#ifdef CONFIG_PAX_SEGMEXEC
65042+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
65043+{
65044+ struct vm_area_struct *vma_m;
65045+
65046+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
65047+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
65048+ BUG_ON(vma->vm_mirror);
65049+ return NULL;
65050+ }
65051+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
65052+ vma_m = vma->vm_mirror;
65053+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
65054+ BUG_ON(vma->vm_file != vma_m->vm_file);
65055+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
65056+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
65057+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
65058+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
65059+ return vma_m;
65060+}
65061+#endif
65062+
65063 /*
65064 * Verify that the stack growth is acceptable and
65065 * update accounting. This is shared with both the
65066@@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
65067 return -ENOMEM;
65068
65069 /* Stack limit test */
65070+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
65071 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
65072 return -ENOMEM;
65073
65074@@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
65075 locked = mm->locked_vm + grow;
65076 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
65077 limit >>= PAGE_SHIFT;
65078+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
65079 if (locked > limit && !capable(CAP_IPC_LOCK))
65080 return -ENOMEM;
65081 }
65082@@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
65083 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
65084 * vma is the last one with address > vma->vm_end. Have to extend vma.
65085 */
65086+#ifndef CONFIG_IA64
65087+static
65088+#endif
65089 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
65090 {
65091 int error;
65092+ bool locknext;
65093
65094 if (!(vma->vm_flags & VM_GROWSUP))
65095 return -EFAULT;
65096
65097+ /* Also guard against wrapping around to address 0. */
65098+ if (address < PAGE_ALIGN(address+1))
65099+ address = PAGE_ALIGN(address+1);
65100+ else
65101+ return -ENOMEM;
65102+
65103 /*
65104 * We must make sure the anon_vma is allocated
65105 * so that the anon_vma locking is not a noop.
65106 */
65107 if (unlikely(anon_vma_prepare(vma)))
65108 return -ENOMEM;
65109+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
65110+ if (locknext && anon_vma_prepare(vma->vm_next))
65111+ return -ENOMEM;
65112 vma_lock_anon_vma(vma);
65113+ if (locknext)
65114+ vma_lock_anon_vma(vma->vm_next);
65115
65116 /*
65117 * vma->vm_start/vm_end cannot change under us because the caller
65118 * is required to hold the mmap_sem in read mode. We need the
65119- * anon_vma lock to serialize against concurrent expand_stacks.
65120- * Also guard against wrapping around to address 0.
65121+ * anon_vma locks to serialize against concurrent expand_stacks
65122+ * and expand_upwards.
65123 */
65124- if (address < PAGE_ALIGN(address+4))
65125- address = PAGE_ALIGN(address+4);
65126- else {
65127- vma_unlock_anon_vma(vma);
65128- return -ENOMEM;
65129- }
65130 error = 0;
65131
65132 /* Somebody else might have raced and expanded it already */
65133- if (address > vma->vm_end) {
65134+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
65135+ error = -ENOMEM;
65136+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
65137 unsigned long size, grow;
65138
65139 size = address - vma->vm_start;
65140@@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
65141 }
65142 }
65143 }
65144+ if (locknext)
65145+ vma_unlock_anon_vma(vma->vm_next);
65146 vma_unlock_anon_vma(vma);
65147 khugepaged_enter_vma_merge(vma);
65148 return error;
65149@@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
65150 unsigned long address)
65151 {
65152 int error;
65153+ bool lockprev = false;
65154+ struct vm_area_struct *prev;
65155
65156 /*
65157 * We must make sure the anon_vma is allocated
65158@@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
65159 if (error)
65160 return error;
65161
65162+ prev = vma->vm_prev;
65163+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
65164+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
65165+#endif
65166+ if (lockprev && anon_vma_prepare(prev))
65167+ return -ENOMEM;
65168+ if (lockprev)
65169+ vma_lock_anon_vma(prev);
65170+
65171 vma_lock_anon_vma(vma);
65172
65173 /*
65174@@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
65175 */
65176
65177 /* Somebody else might have raced and expanded it already */
65178- if (address < vma->vm_start) {
65179+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
65180+ error = -ENOMEM;
65181+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
65182 unsigned long size, grow;
65183
65184+#ifdef CONFIG_PAX_SEGMEXEC
65185+ struct vm_area_struct *vma_m;
65186+
65187+ vma_m = pax_find_mirror_vma(vma);
65188+#endif
65189+
65190 size = vma->vm_end - address;
65191 grow = (vma->vm_start - address) >> PAGE_SHIFT;
65192
65193@@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
65194 if (!error) {
65195 vma->vm_start = address;
65196 vma->vm_pgoff -= grow;
65197+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
65198+
65199+#ifdef CONFIG_PAX_SEGMEXEC
65200+ if (vma_m) {
65201+ vma_m->vm_start -= grow << PAGE_SHIFT;
65202+ vma_m->vm_pgoff -= grow;
65203+ }
65204+#endif
65205+
65206 perf_event_mmap(vma);
65207 }
65208 }
65209 }
65210 vma_unlock_anon_vma(vma);
65211+ if (lockprev)
65212+ vma_unlock_anon_vma(prev);
65213 khugepaged_enter_vma_merge(vma);
65214 return error;
65215 }
65216@@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
65217 do {
65218 long nrpages = vma_pages(vma);
65219
65220+#ifdef CONFIG_PAX_SEGMEXEC
65221+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
65222+ vma = remove_vma(vma);
65223+ continue;
65224+ }
65225+#endif
65226+
65227 mm->total_vm -= nrpages;
65228 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
65229 vma = remove_vma(vma);
65230@@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
65231 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
65232 vma->vm_prev = NULL;
65233 do {
65234+
65235+#ifdef CONFIG_PAX_SEGMEXEC
65236+ if (vma->vm_mirror) {
65237+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
65238+ vma->vm_mirror->vm_mirror = NULL;
65239+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
65240+ vma->vm_mirror = NULL;
65241+ }
65242+#endif
65243+
65244 rb_erase(&vma->vm_rb, &mm->mm_rb);
65245 mm->map_count--;
65246 tail_vma = vma;
65247@@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
65248 struct vm_area_struct *new;
65249 int err = -ENOMEM;
65250
65251+#ifdef CONFIG_PAX_SEGMEXEC
65252+ struct vm_area_struct *vma_m, *new_m = NULL;
65253+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
65254+#endif
65255+
65256 if (is_vm_hugetlb_page(vma) && (addr &
65257 ~(huge_page_mask(hstate_vma(vma)))))
65258 return -EINVAL;
65259
65260+#ifdef CONFIG_PAX_SEGMEXEC
65261+ vma_m = pax_find_mirror_vma(vma);
65262+#endif
65263+
65264 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65265 if (!new)
65266 goto out_err;
65267
65268+#ifdef CONFIG_PAX_SEGMEXEC
65269+ if (vma_m) {
65270+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65271+ if (!new_m) {
65272+ kmem_cache_free(vm_area_cachep, new);
65273+ goto out_err;
65274+ }
65275+ }
65276+#endif
65277+
65278 /* most fields are the same, copy all, and then fixup */
65279 *new = *vma;
65280
65281@@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
65282 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
65283 }
65284
65285+#ifdef CONFIG_PAX_SEGMEXEC
65286+ if (vma_m) {
65287+ *new_m = *vma_m;
65288+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
65289+ new_m->vm_mirror = new;
65290+ new->vm_mirror = new_m;
65291+
65292+ if (new_below)
65293+ new_m->vm_end = addr_m;
65294+ else {
65295+ new_m->vm_start = addr_m;
65296+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
65297+ }
65298+ }
65299+#endif
65300+
65301 pol = mpol_dup(vma_policy(vma));
65302 if (IS_ERR(pol)) {
65303 err = PTR_ERR(pol);
65304@@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
65305 else
65306 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
65307
65308+#ifdef CONFIG_PAX_SEGMEXEC
65309+ if (!err && vma_m) {
65310+ if (anon_vma_clone(new_m, vma_m))
65311+ goto out_free_mpol;
65312+
65313+ mpol_get(pol);
65314+ vma_set_policy(new_m, pol);
65315+
65316+ if (new_m->vm_file) {
65317+ get_file(new_m->vm_file);
65318+ if (vma_m->vm_flags & VM_EXECUTABLE)
65319+ added_exe_file_vma(mm);
65320+ }
65321+
65322+ if (new_m->vm_ops && new_m->vm_ops->open)
65323+ new_m->vm_ops->open(new_m);
65324+
65325+ if (new_below)
65326+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
65327+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
65328+ else
65329+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
65330+
65331+ if (err) {
65332+ if (new_m->vm_ops && new_m->vm_ops->close)
65333+ new_m->vm_ops->close(new_m);
65334+ if (new_m->vm_file) {
65335+ if (vma_m->vm_flags & VM_EXECUTABLE)
65336+ removed_exe_file_vma(mm);
65337+ fput(new_m->vm_file);
65338+ }
65339+ mpol_put(pol);
65340+ }
65341+ }
65342+#endif
65343+
65344 /* Success. */
65345 if (!err)
65346 return 0;
65347@@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
65348 removed_exe_file_vma(mm);
65349 fput(new->vm_file);
65350 }
65351- unlink_anon_vmas(new);
65352 out_free_mpol:
65353 mpol_put(pol);
65354 out_free_vma:
65355+
65356+#ifdef CONFIG_PAX_SEGMEXEC
65357+ if (new_m) {
65358+ unlink_anon_vmas(new_m);
65359+ kmem_cache_free(vm_area_cachep, new_m);
65360+ }
65361+#endif
65362+
65363+ unlink_anon_vmas(new);
65364 kmem_cache_free(vm_area_cachep, new);
65365 out_err:
65366 return err;
65367@@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
65368 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
65369 unsigned long addr, int new_below)
65370 {
65371+
65372+#ifdef CONFIG_PAX_SEGMEXEC
65373+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
65374+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
65375+ if (mm->map_count >= sysctl_max_map_count-1)
65376+ return -ENOMEM;
65377+ } else
65378+#endif
65379+
65380 if (mm->map_count >= sysctl_max_map_count)
65381 return -ENOMEM;
65382
65383@@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
65384 * work. This now handles partial unmappings.
65385 * Jeremy Fitzhardinge <jeremy@goop.org>
65386 */
65387+#ifdef CONFIG_PAX_SEGMEXEC
65388 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65389 {
65390+ int ret = __do_munmap(mm, start, len);
65391+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
65392+ return ret;
65393+
65394+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
65395+}
65396+
65397+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65398+#else
65399+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65400+#endif
65401+{
65402 unsigned long end;
65403 struct vm_area_struct *vma, *prev, *last;
65404
65405+ /*
65406+ * mm->mmap_sem is required to protect against another thread
65407+ * changing the mappings in case we sleep.
65408+ */
65409+ verify_mm_writelocked(mm);
65410+
65411 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
65412 return -EINVAL;
65413
65414@@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
65415 /* Fix up all other VM information */
65416 remove_vma_list(mm, vma);
65417
65418+ track_exec_limit(mm, start, end, 0UL);
65419+
65420 return 0;
65421 }
65422
65423@@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
65424
65425 profile_munmap(addr);
65426
65427+#ifdef CONFIG_PAX_SEGMEXEC
65428+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
65429+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
65430+ return -EINVAL;
65431+#endif
65432+
65433 down_write(&mm->mmap_sem);
65434 ret = do_munmap(mm, addr, len);
65435 up_write(&mm->mmap_sem);
65436 return ret;
65437 }
65438
65439-static inline void verify_mm_writelocked(struct mm_struct *mm)
65440-{
65441-#ifdef CONFIG_DEBUG_VM
65442- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
65443- WARN_ON(1);
65444- up_read(&mm->mmap_sem);
65445- }
65446-#endif
65447-}
65448-
65449 /*
65450 * this is really a simplified "do_mmap". it only handles
65451 * anonymous maps. eventually we may be able to do some
65452@@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
65453 struct rb_node ** rb_link, * rb_parent;
65454 pgoff_t pgoff = addr >> PAGE_SHIFT;
65455 int error;
65456+ unsigned long charged;
65457
65458 len = PAGE_ALIGN(len);
65459 if (!len)
65460@@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
65461
65462 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
65463
65464+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
65465+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65466+ flags &= ~VM_EXEC;
65467+
65468+#ifdef CONFIG_PAX_MPROTECT
65469+ if (mm->pax_flags & MF_PAX_MPROTECT)
65470+ flags &= ~VM_MAYEXEC;
65471+#endif
65472+
65473+ }
65474+#endif
65475+
65476 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
65477 if (error & ~PAGE_MASK)
65478 return error;
65479
65480+ charged = len >> PAGE_SHIFT;
65481+
65482 /*
65483 * mlock MCL_FUTURE?
65484 */
65485 if (mm->def_flags & VM_LOCKED) {
65486 unsigned long locked, lock_limit;
65487- locked = len >> PAGE_SHIFT;
65488+ locked = charged;
65489 locked += mm->locked_vm;
65490 lock_limit = rlimit(RLIMIT_MEMLOCK);
65491 lock_limit >>= PAGE_SHIFT;
65492@@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
65493 /*
65494 * Clear old maps. this also does some error checking for us
65495 */
65496- munmap_back:
65497 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65498 if (vma && vma->vm_start < addr + len) {
65499 if (do_munmap(mm, addr, len))
65500 return -ENOMEM;
65501- goto munmap_back;
65502+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65503+ BUG_ON(vma && vma->vm_start < addr + len);
65504 }
65505
65506 /* Check against address space limits *after* clearing old maps... */
65507- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
65508+ if (!may_expand_vm(mm, charged))
65509 return -ENOMEM;
65510
65511 if (mm->map_count > sysctl_max_map_count)
65512 return -ENOMEM;
65513
65514- if (security_vm_enough_memory(len >> PAGE_SHIFT))
65515+ if (security_vm_enough_memory(charged))
65516 return -ENOMEM;
65517
65518 /* Can we just expand an old private anonymous mapping? */
65519@@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
65520 */
65521 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65522 if (!vma) {
65523- vm_unacct_memory(len >> PAGE_SHIFT);
65524+ vm_unacct_memory(charged);
65525 return -ENOMEM;
65526 }
65527
65528@@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
65529 vma_link(mm, vma, prev, rb_link, rb_parent);
65530 out:
65531 perf_event_mmap(vma);
65532- mm->total_vm += len >> PAGE_SHIFT;
65533+ mm->total_vm += charged;
65534 if (flags & VM_LOCKED) {
65535 if (!mlock_vma_pages_range(vma, addr, addr + len))
65536- mm->locked_vm += (len >> PAGE_SHIFT);
65537+ mm->locked_vm += charged;
65538 }
65539+ track_exec_limit(mm, addr, addr + len, flags);
65540 return addr;
65541 }
65542
65543@@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
65544 * Walk the list again, actually closing and freeing it,
65545 * with preemption enabled, without holding any MM locks.
65546 */
65547- while (vma)
65548+ while (vma) {
65549+ vma->vm_mirror = NULL;
65550 vma = remove_vma(vma);
65551+ }
65552
65553 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
65554 }
65555@@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
65556 struct vm_area_struct * __vma, * prev;
65557 struct rb_node ** rb_link, * rb_parent;
65558
65559+#ifdef CONFIG_PAX_SEGMEXEC
65560+ struct vm_area_struct *vma_m = NULL;
65561+#endif
65562+
65563+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
65564+ return -EPERM;
65565+
65566 /*
65567 * The vm_pgoff of a purely anonymous vma should be irrelevant
65568 * until its first write fault, when page's anon_vma and index
65569@@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
65570 if ((vma->vm_flags & VM_ACCOUNT) &&
65571 security_vm_enough_memory_mm(mm, vma_pages(vma)))
65572 return -ENOMEM;
65573+
65574+#ifdef CONFIG_PAX_SEGMEXEC
65575+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
65576+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65577+ if (!vma_m)
65578+ return -ENOMEM;
65579+ }
65580+#endif
65581+
65582 vma_link(mm, vma, prev, rb_link, rb_parent);
65583+
65584+#ifdef CONFIG_PAX_SEGMEXEC
65585+ if (vma_m)
65586+ BUG_ON(pax_mirror_vma(vma_m, vma));
65587+#endif
65588+
65589 return 0;
65590 }
65591
65592@@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
65593 struct rb_node **rb_link, *rb_parent;
65594 struct mempolicy *pol;
65595
65596+ BUG_ON(vma->vm_mirror);
65597+
65598 /*
65599 * If anonymous vma has not yet been faulted, update new pgoff
65600 * to match new location, to increase its chance of merging.
65601@@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
65602 return NULL;
65603 }
65604
65605+#ifdef CONFIG_PAX_SEGMEXEC
65606+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
65607+{
65608+ struct vm_area_struct *prev_m;
65609+ struct rb_node **rb_link_m, *rb_parent_m;
65610+ struct mempolicy *pol_m;
65611+
65612+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
65613+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
65614+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
65615+ *vma_m = *vma;
65616+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
65617+ if (anon_vma_clone(vma_m, vma))
65618+ return -ENOMEM;
65619+ pol_m = vma_policy(vma_m);
65620+ mpol_get(pol_m);
65621+ vma_set_policy(vma_m, pol_m);
65622+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
65623+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
65624+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
65625+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
65626+ if (vma_m->vm_file)
65627+ get_file(vma_m->vm_file);
65628+ if (vma_m->vm_ops && vma_m->vm_ops->open)
65629+ vma_m->vm_ops->open(vma_m);
65630+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
65631+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
65632+ vma_m->vm_mirror = vma;
65633+ vma->vm_mirror = vma_m;
65634+ return 0;
65635+}
65636+#endif
65637+
65638 /*
65639 * Return true if the calling process may expand its vm space by the passed
65640 * number of pages
65641@@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
65642 unsigned long lim;
65643
65644 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
65645-
65646+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
65647 if (cur + npages > lim)
65648 return 0;
65649 return 1;
65650@@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
65651 vma->vm_start = addr;
65652 vma->vm_end = addr + len;
65653
65654+#ifdef CONFIG_PAX_MPROTECT
65655+ if (mm->pax_flags & MF_PAX_MPROTECT) {
65656+#ifndef CONFIG_PAX_MPROTECT_COMPAT
65657+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
65658+ return -EPERM;
65659+ if (!(vm_flags & VM_EXEC))
65660+ vm_flags &= ~VM_MAYEXEC;
65661+#else
65662+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
65663+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
65664+#endif
65665+ else
65666+ vm_flags &= ~VM_MAYWRITE;
65667+ }
65668+#endif
65669+
65670 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
65671 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65672
65673diff -urNp linux-3.0.4/mm/mprotect.c linux-3.0.4/mm/mprotect.c
65674--- linux-3.0.4/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
65675+++ linux-3.0.4/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
65676@@ -23,10 +23,16 @@
65677 #include <linux/mmu_notifier.h>
65678 #include <linux/migrate.h>
65679 #include <linux/perf_event.h>
65680+
65681+#ifdef CONFIG_PAX_MPROTECT
65682+#include <linux/elf.h>
65683+#endif
65684+
65685 #include <asm/uaccess.h>
65686 #include <asm/pgtable.h>
65687 #include <asm/cacheflush.h>
65688 #include <asm/tlbflush.h>
65689+#include <asm/mmu_context.h>
65690
65691 #ifndef pgprot_modify
65692 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
65693@@ -141,6 +147,48 @@ static void change_protection(struct vm_
65694 flush_tlb_range(vma, start, end);
65695 }
65696
65697+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65698+/* called while holding the mmap semaphor for writing except stack expansion */
65699+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
65700+{
65701+ unsigned long oldlimit, newlimit = 0UL;
65702+
65703+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
65704+ return;
65705+
65706+ spin_lock(&mm->page_table_lock);
65707+ oldlimit = mm->context.user_cs_limit;
65708+ if ((prot & VM_EXEC) && oldlimit < end)
65709+ /* USER_CS limit moved up */
65710+ newlimit = end;
65711+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
65712+ /* USER_CS limit moved down */
65713+ newlimit = start;
65714+
65715+ if (newlimit) {
65716+ mm->context.user_cs_limit = newlimit;
65717+
65718+#ifdef CONFIG_SMP
65719+ wmb();
65720+ cpus_clear(mm->context.cpu_user_cs_mask);
65721+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
65722+#endif
65723+
65724+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
65725+ }
65726+ spin_unlock(&mm->page_table_lock);
65727+ if (newlimit == end) {
65728+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
65729+
65730+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
65731+ if (is_vm_hugetlb_page(vma))
65732+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
65733+ else
65734+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
65735+ }
65736+}
65737+#endif
65738+
65739 int
65740 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
65741 unsigned long start, unsigned long end, unsigned long newflags)
65742@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
65743 int error;
65744 int dirty_accountable = 0;
65745
65746+#ifdef CONFIG_PAX_SEGMEXEC
65747+ struct vm_area_struct *vma_m = NULL;
65748+ unsigned long start_m, end_m;
65749+
65750+ start_m = start + SEGMEXEC_TASK_SIZE;
65751+ end_m = end + SEGMEXEC_TASK_SIZE;
65752+#endif
65753+
65754 if (newflags == oldflags) {
65755 *pprev = vma;
65756 return 0;
65757 }
65758
65759+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
65760+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
65761+
65762+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
65763+ return -ENOMEM;
65764+
65765+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
65766+ return -ENOMEM;
65767+ }
65768+
65769 /*
65770 * If we make a private mapping writable we increase our commit;
65771 * but (without finer accounting) cannot reduce our commit if we
65772@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
65773 }
65774 }
65775
65776+#ifdef CONFIG_PAX_SEGMEXEC
65777+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
65778+ if (start != vma->vm_start) {
65779+ error = split_vma(mm, vma, start, 1);
65780+ if (error)
65781+ goto fail;
65782+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
65783+ *pprev = (*pprev)->vm_next;
65784+ }
65785+
65786+ if (end != vma->vm_end) {
65787+ error = split_vma(mm, vma, end, 0);
65788+ if (error)
65789+ goto fail;
65790+ }
65791+
65792+ if (pax_find_mirror_vma(vma)) {
65793+ error = __do_munmap(mm, start_m, end_m - start_m);
65794+ if (error)
65795+ goto fail;
65796+ } else {
65797+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65798+ if (!vma_m) {
65799+ error = -ENOMEM;
65800+ goto fail;
65801+ }
65802+ vma->vm_flags = newflags;
65803+ error = pax_mirror_vma(vma_m, vma);
65804+ if (error) {
65805+ vma->vm_flags = oldflags;
65806+ goto fail;
65807+ }
65808+ }
65809+ }
65810+#endif
65811+
65812 /*
65813 * First try to merge with previous and/or next vma.
65814 */
65815@@ -204,9 +306,21 @@ success:
65816 * vm_flags and vm_page_prot are protected by the mmap_sem
65817 * held in write mode.
65818 */
65819+
65820+#ifdef CONFIG_PAX_SEGMEXEC
65821+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
65822+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
65823+#endif
65824+
65825 vma->vm_flags = newflags;
65826+
65827+#ifdef CONFIG_PAX_MPROTECT
65828+ if (mm->binfmt && mm->binfmt->handle_mprotect)
65829+ mm->binfmt->handle_mprotect(vma, newflags);
65830+#endif
65831+
65832 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
65833- vm_get_page_prot(newflags));
65834+ vm_get_page_prot(vma->vm_flags));
65835
65836 if (vma_wants_writenotify(vma)) {
65837 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
65838@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65839 end = start + len;
65840 if (end <= start)
65841 return -ENOMEM;
65842+
65843+#ifdef CONFIG_PAX_SEGMEXEC
65844+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
65845+ if (end > SEGMEXEC_TASK_SIZE)
65846+ return -EINVAL;
65847+ } else
65848+#endif
65849+
65850+ if (end > TASK_SIZE)
65851+ return -EINVAL;
65852+
65853 if (!arch_validate_prot(prot))
65854 return -EINVAL;
65855
65856@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65857 /*
65858 * Does the application expect PROT_READ to imply PROT_EXEC:
65859 */
65860- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
65861+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
65862 prot |= PROT_EXEC;
65863
65864 vm_flags = calc_vm_prot_bits(prot);
65865@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65866 if (start > vma->vm_start)
65867 prev = vma;
65868
65869+#ifdef CONFIG_PAX_MPROTECT
65870+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
65871+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
65872+#endif
65873+
65874 for (nstart = start ; ; ) {
65875 unsigned long newflags;
65876
65877@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65878
65879 /* newflags >> 4 shift VM_MAY% in place of VM_% */
65880 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
65881+ if (prot & (PROT_WRITE | PROT_EXEC))
65882+ gr_log_rwxmprotect(vma->vm_file);
65883+
65884+ error = -EACCES;
65885+ goto out;
65886+ }
65887+
65888+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
65889 error = -EACCES;
65890 goto out;
65891 }
65892@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65893 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
65894 if (error)
65895 goto out;
65896+
65897+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
65898+
65899 nstart = tmp;
65900
65901 if (nstart < prev->vm_end)
65902diff -urNp linux-3.0.4/mm/mremap.c linux-3.0.4/mm/mremap.c
65903--- linux-3.0.4/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
65904+++ linux-3.0.4/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
65905@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
65906 continue;
65907 pte = ptep_clear_flush(vma, old_addr, old_pte);
65908 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
65909+
65910+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65911+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
65912+ pte = pte_exprotect(pte);
65913+#endif
65914+
65915 set_pte_at(mm, new_addr, new_pte, pte);
65916 }
65917
65918@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
65919 if (is_vm_hugetlb_page(vma))
65920 goto Einval;
65921
65922+#ifdef CONFIG_PAX_SEGMEXEC
65923+ if (pax_find_mirror_vma(vma))
65924+ goto Einval;
65925+#endif
65926+
65927 /* We can't remap across vm area boundaries */
65928 if (old_len > vma->vm_end - addr)
65929 goto Efault;
65930@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
65931 unsigned long ret = -EINVAL;
65932 unsigned long charged = 0;
65933 unsigned long map_flags;
65934+ unsigned long pax_task_size = TASK_SIZE;
65935
65936 if (new_addr & ~PAGE_MASK)
65937 goto out;
65938
65939- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
65940+#ifdef CONFIG_PAX_SEGMEXEC
65941+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
65942+ pax_task_size = SEGMEXEC_TASK_SIZE;
65943+#endif
65944+
65945+ pax_task_size -= PAGE_SIZE;
65946+
65947+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
65948 goto out;
65949
65950 /* Check if the location we're moving into overlaps the
65951 * old location at all, and fail if it does.
65952 */
65953- if ((new_addr <= addr) && (new_addr+new_len) > addr)
65954- goto out;
65955-
65956- if ((addr <= new_addr) && (addr+old_len) > new_addr)
65957+ if (addr + old_len > new_addr && new_addr + new_len > addr)
65958 goto out;
65959
65960 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65961@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
65962 struct vm_area_struct *vma;
65963 unsigned long ret = -EINVAL;
65964 unsigned long charged = 0;
65965+ unsigned long pax_task_size = TASK_SIZE;
65966
65967 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
65968 goto out;
65969@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
65970 if (!new_len)
65971 goto out;
65972
65973+#ifdef CONFIG_PAX_SEGMEXEC
65974+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
65975+ pax_task_size = SEGMEXEC_TASK_SIZE;
65976+#endif
65977+
65978+ pax_task_size -= PAGE_SIZE;
65979+
65980+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
65981+ old_len > pax_task_size || addr > pax_task_size-old_len)
65982+ goto out;
65983+
65984 if (flags & MREMAP_FIXED) {
65985 if (flags & MREMAP_MAYMOVE)
65986 ret = mremap_to(addr, old_len, new_addr, new_len);
65987@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
65988 addr + new_len);
65989 }
65990 ret = addr;
65991+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
65992 goto out;
65993 }
65994 }
65995@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
65996 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65997 if (ret)
65998 goto out;
65999+
66000+ map_flags = vma->vm_flags;
66001 ret = move_vma(vma, addr, old_len, new_len, new_addr);
66002+ if (!(ret & ~PAGE_MASK)) {
66003+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
66004+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
66005+ }
66006 }
66007 out:
66008 if (ret & ~PAGE_MASK)
66009diff -urNp linux-3.0.4/mm/nobootmem.c linux-3.0.4/mm/nobootmem.c
66010--- linux-3.0.4/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
66011+++ linux-3.0.4/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
66012@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
66013 unsigned long __init free_all_memory_core_early(int nodeid)
66014 {
66015 int i;
66016- u64 start, end;
66017+ u64 start, end, startrange, endrange;
66018 unsigned long count = 0;
66019- struct range *range = NULL;
66020+ struct range *range = NULL, rangerange = { 0, 0 };
66021 int nr_range;
66022
66023 nr_range = get_free_all_memory_range(&range, nodeid);
66024+ startrange = __pa(range) >> PAGE_SHIFT;
66025+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
66026
66027 for (i = 0; i < nr_range; i++) {
66028 start = range[i].start;
66029 end = range[i].end;
66030+ if (start <= endrange && startrange < end) {
66031+ BUG_ON(rangerange.start | rangerange.end);
66032+ rangerange = range[i];
66033+ continue;
66034+ }
66035 count += end - start;
66036 __free_pages_memory(start, end);
66037 }
66038+ start = rangerange.start;
66039+ end = rangerange.end;
66040+ count += end - start;
66041+ __free_pages_memory(start, end);
66042
66043 return count;
66044 }
66045diff -urNp linux-3.0.4/mm/nommu.c linux-3.0.4/mm/nommu.c
66046--- linux-3.0.4/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
66047+++ linux-3.0.4/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
66048@@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
66049 int sysctl_overcommit_ratio = 50; /* default is 50% */
66050 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
66051 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
66052-int heap_stack_gap = 0;
66053
66054 atomic_long_t mmap_pages_allocated;
66055
66056@@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
66057 EXPORT_SYMBOL(find_vma);
66058
66059 /*
66060- * find a VMA
66061- * - we don't extend stack VMAs under NOMMU conditions
66062- */
66063-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
66064-{
66065- return find_vma(mm, addr);
66066-}
66067-
66068-/*
66069 * expand a stack to a given address
66070 * - not supported under NOMMU conditions
66071 */
66072@@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
66073
66074 /* most fields are the same, copy all, and then fixup */
66075 *new = *vma;
66076+ INIT_LIST_HEAD(&new->anon_vma_chain);
66077 *region = *vma->vm_region;
66078 new->vm_region = region;
66079
66080diff -urNp linux-3.0.4/mm/page_alloc.c linux-3.0.4/mm/page_alloc.c
66081--- linux-3.0.4/mm/page_alloc.c 2011-07-21 22:17:23.000000000 -0400
66082+++ linux-3.0.4/mm/page_alloc.c 2011-08-23 21:48:14.000000000 -0400
66083@@ -340,7 +340,7 @@ out:
66084 * This usage means that zero-order pages may not be compound.
66085 */
66086
66087-static void free_compound_page(struct page *page)
66088+void free_compound_page(struct page *page)
66089 {
66090 __free_pages_ok(page, compound_order(page));
66091 }
66092@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
66093 int i;
66094 int bad = 0;
66095
66096+#ifdef CONFIG_PAX_MEMORY_SANITIZE
66097+ unsigned long index = 1UL << order;
66098+#endif
66099+
66100 trace_mm_page_free_direct(page, order);
66101 kmemcheck_free_shadow(page, order);
66102
66103@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
66104 debug_check_no_obj_freed(page_address(page),
66105 PAGE_SIZE << order);
66106 }
66107+
66108+#ifdef CONFIG_PAX_MEMORY_SANITIZE
66109+ for (; index; --index)
66110+ sanitize_highpage(page + index - 1);
66111+#endif
66112+
66113 arch_free_page(page, order);
66114 kernel_map_pages(page, 1 << order, 0);
66115
66116@@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
66117 arch_alloc_page(page, order);
66118 kernel_map_pages(page, 1 << order, 1);
66119
66120+#ifndef CONFIG_PAX_MEMORY_SANITIZE
66121 if (gfp_flags & __GFP_ZERO)
66122 prep_zero_page(page, order, gfp_flags);
66123+#endif
66124
66125 if (order && (gfp_flags & __GFP_COMP))
66126 prep_compound_page(page, order);
66127@@ -2525,6 +2537,8 @@ void show_free_areas(unsigned int filter
66128 int cpu;
66129 struct zone *zone;
66130
66131+ pax_track_stack();
66132+
66133 for_each_populated_zone(zone) {
66134 if (skip_free_areas_node(filter, zone_to_nid(zone)))
66135 continue;
66136diff -urNp linux-3.0.4/mm/percpu.c linux-3.0.4/mm/percpu.c
66137--- linux-3.0.4/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
66138+++ linux-3.0.4/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
66139@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
66140 static unsigned int pcpu_last_unit_cpu __read_mostly;
66141
66142 /* the address of the first chunk which starts with the kernel static area */
66143-void *pcpu_base_addr __read_mostly;
66144+void *pcpu_base_addr __read_only;
66145 EXPORT_SYMBOL_GPL(pcpu_base_addr);
66146
66147 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
66148diff -urNp linux-3.0.4/mm/rmap.c linux-3.0.4/mm/rmap.c
66149--- linux-3.0.4/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
66150+++ linux-3.0.4/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
66151@@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
66152 struct anon_vma *anon_vma = vma->anon_vma;
66153 struct anon_vma_chain *avc;
66154
66155+#ifdef CONFIG_PAX_SEGMEXEC
66156+ struct anon_vma_chain *avc_m = NULL;
66157+#endif
66158+
66159 might_sleep();
66160 if (unlikely(!anon_vma)) {
66161 struct mm_struct *mm = vma->vm_mm;
66162@@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
66163 if (!avc)
66164 goto out_enomem;
66165
66166+#ifdef CONFIG_PAX_SEGMEXEC
66167+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
66168+ if (!avc_m)
66169+ goto out_enomem_free_avc;
66170+#endif
66171+
66172 anon_vma = find_mergeable_anon_vma(vma);
66173 allocated = NULL;
66174 if (!anon_vma) {
66175@@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
66176 /* page_table_lock to protect against threads */
66177 spin_lock(&mm->page_table_lock);
66178 if (likely(!vma->anon_vma)) {
66179+
66180+#ifdef CONFIG_PAX_SEGMEXEC
66181+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
66182+
66183+ if (vma_m) {
66184+ BUG_ON(vma_m->anon_vma);
66185+ vma_m->anon_vma = anon_vma;
66186+ avc_m->anon_vma = anon_vma;
66187+ avc_m->vma = vma;
66188+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
66189+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
66190+ avc_m = NULL;
66191+ }
66192+#endif
66193+
66194 vma->anon_vma = anon_vma;
66195 avc->anon_vma = anon_vma;
66196 avc->vma = vma;
66197@@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
66198
66199 if (unlikely(allocated))
66200 put_anon_vma(allocated);
66201+
66202+#ifdef CONFIG_PAX_SEGMEXEC
66203+ if (unlikely(avc_m))
66204+ anon_vma_chain_free(avc_m);
66205+#endif
66206+
66207 if (unlikely(avc))
66208 anon_vma_chain_free(avc);
66209 }
66210 return 0;
66211
66212 out_enomem_free_avc:
66213+
66214+#ifdef CONFIG_PAX_SEGMEXEC
66215+ if (avc_m)
66216+ anon_vma_chain_free(avc_m);
66217+#endif
66218+
66219 anon_vma_chain_free(avc);
66220 out_enomem:
66221 return -ENOMEM;
66222@@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
66223 * Attach the anon_vmas from src to dst.
66224 * Returns 0 on success, -ENOMEM on failure.
66225 */
66226-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
66227+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
66228 {
66229 struct anon_vma_chain *avc, *pavc;
66230 struct anon_vma *root = NULL;
66231@@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
66232 * the corresponding VMA in the parent process is attached to.
66233 * Returns 0 on success, non-zero on failure.
66234 */
66235-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
66236+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
66237 {
66238 struct anon_vma_chain *avc;
66239 struct anon_vma *anon_vma;
66240diff -urNp linux-3.0.4/mm/shmem.c linux-3.0.4/mm/shmem.c
66241--- linux-3.0.4/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
66242+++ linux-3.0.4/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
66243@@ -31,7 +31,7 @@
66244 #include <linux/percpu_counter.h>
66245 #include <linux/swap.h>
66246
66247-static struct vfsmount *shm_mnt;
66248+struct vfsmount *shm_mnt;
66249
66250 #ifdef CONFIG_SHMEM
66251 /*
66252@@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
66253 goto unlock;
66254 }
66255 entry = shmem_swp_entry(info, index, NULL);
66256+ if (!entry)
66257+ goto unlock;
66258 if (entry->val) {
66259 /*
66260 * The more uptodate page coming down from a stacked
66261@@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
66262 struct vm_area_struct pvma;
66263 struct page *page;
66264
66265+ pax_track_stack();
66266+
66267 spol = mpol_cond_copy(&mpol,
66268 mpol_shared_policy_lookup(&info->policy, idx));
66269
66270@@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
66271 int err = -ENOMEM;
66272
66273 /* Round up to L1_CACHE_BYTES to resist false sharing */
66274- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
66275- L1_CACHE_BYTES), GFP_KERNEL);
66276+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
66277 if (!sbinfo)
66278 return -ENOMEM;
66279
66280diff -urNp linux-3.0.4/mm/slab.c linux-3.0.4/mm/slab.c
66281--- linux-3.0.4/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
66282+++ linux-3.0.4/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
66283@@ -151,7 +151,7 @@
66284
66285 /* Legal flag mask for kmem_cache_create(). */
66286 #if DEBUG
66287-# define CREATE_MASK (SLAB_RED_ZONE | \
66288+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
66289 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
66290 SLAB_CACHE_DMA | \
66291 SLAB_STORE_USER | \
66292@@ -159,7 +159,7 @@
66293 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66294 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
66295 #else
66296-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
66297+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
66298 SLAB_CACHE_DMA | \
66299 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
66300 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66301@@ -288,7 +288,7 @@ struct kmem_list3 {
66302 * Need this for bootstrapping a per node allocator.
66303 */
66304 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
66305-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
66306+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
66307 #define CACHE_CACHE 0
66308 #define SIZE_AC MAX_NUMNODES
66309 #define SIZE_L3 (2 * MAX_NUMNODES)
66310@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
66311 if ((x)->max_freeable < i) \
66312 (x)->max_freeable = i; \
66313 } while (0)
66314-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
66315-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
66316-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
66317-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
66318+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
66319+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
66320+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
66321+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
66322 #else
66323 #define STATS_INC_ACTIVE(x) do { } while (0)
66324 #define STATS_DEC_ACTIVE(x) do { } while (0)
66325@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
66326 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
66327 */
66328 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
66329- const struct slab *slab, void *obj)
66330+ const struct slab *slab, const void *obj)
66331 {
66332 u32 offset = (obj - slab->s_mem);
66333 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
66334@@ -564,7 +564,7 @@ struct cache_names {
66335 static struct cache_names __initdata cache_names[] = {
66336 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
66337 #include <linux/kmalloc_sizes.h>
66338- {NULL,}
66339+ {NULL}
66340 #undef CACHE
66341 };
66342
66343@@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
66344 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
66345 sizes[INDEX_AC].cs_size,
66346 ARCH_KMALLOC_MINALIGN,
66347- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66348+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66349 NULL);
66350
66351 if (INDEX_AC != INDEX_L3) {
66352@@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
66353 kmem_cache_create(names[INDEX_L3].name,
66354 sizes[INDEX_L3].cs_size,
66355 ARCH_KMALLOC_MINALIGN,
66356- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66357+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66358 NULL);
66359 }
66360
66361@@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
66362 sizes->cs_cachep = kmem_cache_create(names->name,
66363 sizes->cs_size,
66364 ARCH_KMALLOC_MINALIGN,
66365- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66366+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66367 NULL);
66368 }
66369 #ifdef CONFIG_ZONE_DMA
66370@@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
66371 }
66372 /* cpu stats */
66373 {
66374- unsigned long allochit = atomic_read(&cachep->allochit);
66375- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
66376- unsigned long freehit = atomic_read(&cachep->freehit);
66377- unsigned long freemiss = atomic_read(&cachep->freemiss);
66378+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
66379+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
66380+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
66381+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
66382
66383 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
66384 allochit, allocmiss, freehit, freemiss);
66385@@ -4532,15 +4532,66 @@ static const struct file_operations proc
66386
66387 static int __init slab_proc_init(void)
66388 {
66389- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
66390+ mode_t gr_mode = S_IRUGO;
66391+
66392+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66393+ gr_mode = S_IRUSR;
66394+#endif
66395+
66396+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
66397 #ifdef CONFIG_DEBUG_SLAB_LEAK
66398- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
66399+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
66400 #endif
66401 return 0;
66402 }
66403 module_init(slab_proc_init);
66404 #endif
66405
66406+void check_object_size(const void *ptr, unsigned long n, bool to)
66407+{
66408+
66409+#ifdef CONFIG_PAX_USERCOPY
66410+ struct page *page;
66411+ struct kmem_cache *cachep = NULL;
66412+ struct slab *slabp;
66413+ unsigned int objnr;
66414+ unsigned long offset;
66415+
66416+ if (!n)
66417+ return;
66418+
66419+ if (ZERO_OR_NULL_PTR(ptr))
66420+ goto report;
66421+
66422+ if (!virt_addr_valid(ptr))
66423+ return;
66424+
66425+ page = virt_to_head_page(ptr);
66426+
66427+ if (!PageSlab(page)) {
66428+ if (object_is_on_stack(ptr, n) == -1)
66429+ goto report;
66430+ return;
66431+ }
66432+
66433+ cachep = page_get_cache(page);
66434+ if (!(cachep->flags & SLAB_USERCOPY))
66435+ goto report;
66436+
66437+ slabp = page_get_slab(page);
66438+ objnr = obj_to_index(cachep, slabp, ptr);
66439+ BUG_ON(objnr >= cachep->num);
66440+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
66441+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
66442+ return;
66443+
66444+report:
66445+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
66446+#endif
66447+
66448+}
66449+EXPORT_SYMBOL(check_object_size);
66450+
66451 /**
66452 * ksize - get the actual amount of memory allocated for a given object
66453 * @objp: Pointer to the object
66454diff -urNp linux-3.0.4/mm/slob.c linux-3.0.4/mm/slob.c
66455--- linux-3.0.4/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
66456+++ linux-3.0.4/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
66457@@ -29,7 +29,7 @@
66458 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
66459 * alloc_pages() directly, allocating compound pages so the page order
66460 * does not have to be separately tracked, and also stores the exact
66461- * allocation size in page->private so that it can be used to accurately
66462+ * allocation size in slob_page->size so that it can be used to accurately
66463 * provide ksize(). These objects are detected in kfree() because slob_page()
66464 * is false for them.
66465 *
66466@@ -58,6 +58,7 @@
66467 */
66468
66469 #include <linux/kernel.h>
66470+#include <linux/sched.h>
66471 #include <linux/slab.h>
66472 #include <linux/mm.h>
66473 #include <linux/swap.h> /* struct reclaim_state */
66474@@ -102,7 +103,8 @@ struct slob_page {
66475 unsigned long flags; /* mandatory */
66476 atomic_t _count; /* mandatory */
66477 slobidx_t units; /* free units left in page */
66478- unsigned long pad[2];
66479+ unsigned long pad[1];
66480+ unsigned long size; /* size when >=PAGE_SIZE */
66481 slob_t *free; /* first free slob_t in page */
66482 struct list_head list; /* linked list of free pages */
66483 };
66484@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
66485 */
66486 static inline int is_slob_page(struct slob_page *sp)
66487 {
66488- return PageSlab((struct page *)sp);
66489+ return PageSlab((struct page *)sp) && !sp->size;
66490 }
66491
66492 static inline void set_slob_page(struct slob_page *sp)
66493@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
66494
66495 static inline struct slob_page *slob_page(const void *addr)
66496 {
66497- return (struct slob_page *)virt_to_page(addr);
66498+ return (struct slob_page *)virt_to_head_page(addr);
66499 }
66500
66501 /*
66502@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
66503 /*
66504 * Return the size of a slob block.
66505 */
66506-static slobidx_t slob_units(slob_t *s)
66507+static slobidx_t slob_units(const slob_t *s)
66508 {
66509 if (s->units > 0)
66510 return s->units;
66511@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
66512 /*
66513 * Return the next free slob block pointer after this one.
66514 */
66515-static slob_t *slob_next(slob_t *s)
66516+static slob_t *slob_next(const slob_t *s)
66517 {
66518 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
66519 slobidx_t next;
66520@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
66521 /*
66522 * Returns true if s is the last free block in its page.
66523 */
66524-static int slob_last(slob_t *s)
66525+static int slob_last(const slob_t *s)
66526 {
66527 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
66528 }
66529@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
66530 if (!page)
66531 return NULL;
66532
66533+ set_slob_page(page);
66534 return page_address(page);
66535 }
66536
66537@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
66538 if (!b)
66539 return NULL;
66540 sp = slob_page(b);
66541- set_slob_page(sp);
66542
66543 spin_lock_irqsave(&slob_lock, flags);
66544 sp->units = SLOB_UNITS(PAGE_SIZE);
66545 sp->free = b;
66546+ sp->size = 0;
66547 INIT_LIST_HEAD(&sp->list);
66548 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
66549 set_slob_page_free(sp, slob_list);
66550@@ -476,10 +479,9 @@ out:
66551 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
66552 */
66553
66554-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66555+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
66556 {
66557- unsigned int *m;
66558- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66559+ slob_t *m;
66560 void *ret;
66561
66562 lockdep_trace_alloc(gfp);
66563@@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
66564
66565 if (!m)
66566 return NULL;
66567- *m = size;
66568+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
66569+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
66570+ m[0].units = size;
66571+ m[1].units = align;
66572 ret = (void *)m + align;
66573
66574 trace_kmalloc_node(_RET_IP_, ret,
66575@@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
66576 gfp |= __GFP_COMP;
66577 ret = slob_new_pages(gfp, order, node);
66578 if (ret) {
66579- struct page *page;
66580- page = virt_to_page(ret);
66581- page->private = size;
66582+ struct slob_page *sp;
66583+ sp = slob_page(ret);
66584+ sp->size = size;
66585 }
66586
66587 trace_kmalloc_node(_RET_IP_, ret,
66588 size, PAGE_SIZE << order, gfp, node);
66589 }
66590
66591- kmemleak_alloc(ret, size, 1, gfp);
66592+ return ret;
66593+}
66594+
66595+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66596+{
66597+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66598+ void *ret = __kmalloc_node_align(size, gfp, node, align);
66599+
66600+ if (!ZERO_OR_NULL_PTR(ret))
66601+ kmemleak_alloc(ret, size, 1, gfp);
66602 return ret;
66603 }
66604 EXPORT_SYMBOL(__kmalloc_node);
66605@@ -531,13 +545,88 @@ void kfree(const void *block)
66606 sp = slob_page(block);
66607 if (is_slob_page(sp)) {
66608 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66609- unsigned int *m = (unsigned int *)(block - align);
66610- slob_free(m, *m + align);
66611- } else
66612+ slob_t *m = (slob_t *)(block - align);
66613+ slob_free(m, m[0].units + align);
66614+ } else {
66615+ clear_slob_page(sp);
66616+ free_slob_page(sp);
66617+ sp->size = 0;
66618 put_page(&sp->page);
66619+ }
66620 }
66621 EXPORT_SYMBOL(kfree);
66622
66623+void check_object_size(const void *ptr, unsigned long n, bool to)
66624+{
66625+
66626+#ifdef CONFIG_PAX_USERCOPY
66627+ struct slob_page *sp;
66628+ const slob_t *free;
66629+ const void *base;
66630+ unsigned long flags;
66631+
66632+ if (!n)
66633+ return;
66634+
66635+ if (ZERO_OR_NULL_PTR(ptr))
66636+ goto report;
66637+
66638+ if (!virt_addr_valid(ptr))
66639+ return;
66640+
66641+ sp = slob_page(ptr);
66642+ if (!PageSlab((struct page*)sp)) {
66643+ if (object_is_on_stack(ptr, n) == -1)
66644+ goto report;
66645+ return;
66646+ }
66647+
66648+ if (sp->size) {
66649+ base = page_address(&sp->page);
66650+ if (base <= ptr && n <= sp->size - (ptr - base))
66651+ return;
66652+ goto report;
66653+ }
66654+
66655+ /* some tricky double walking to find the chunk */
66656+ spin_lock_irqsave(&slob_lock, flags);
66657+ base = (void *)((unsigned long)ptr & PAGE_MASK);
66658+ free = sp->free;
66659+
66660+ while (!slob_last(free) && (void *)free <= ptr) {
66661+ base = free + slob_units(free);
66662+ free = slob_next(free);
66663+ }
66664+
66665+ while (base < (void *)free) {
66666+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
66667+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
66668+ int offset;
66669+
66670+ if (ptr < base + align)
66671+ break;
66672+
66673+ offset = ptr - base - align;
66674+ if (offset >= m) {
66675+ base += size;
66676+ continue;
66677+ }
66678+
66679+ if (n > m - offset)
66680+ break;
66681+
66682+ spin_unlock_irqrestore(&slob_lock, flags);
66683+ return;
66684+ }
66685+
66686+ spin_unlock_irqrestore(&slob_lock, flags);
66687+report:
66688+ pax_report_usercopy(ptr, n, to, NULL);
66689+#endif
66690+
66691+}
66692+EXPORT_SYMBOL(check_object_size);
66693+
66694 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
66695 size_t ksize(const void *block)
66696 {
66697@@ -550,10 +639,10 @@ size_t ksize(const void *block)
66698 sp = slob_page(block);
66699 if (is_slob_page(sp)) {
66700 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66701- unsigned int *m = (unsigned int *)(block - align);
66702- return SLOB_UNITS(*m) * SLOB_UNIT;
66703+ slob_t *m = (slob_t *)(block - align);
66704+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
66705 } else
66706- return sp->page.private;
66707+ return sp->size;
66708 }
66709 EXPORT_SYMBOL(ksize);
66710
66711@@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
66712 {
66713 struct kmem_cache *c;
66714
66715+#ifdef CONFIG_PAX_USERCOPY
66716+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
66717+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
66718+#else
66719 c = slob_alloc(sizeof(struct kmem_cache),
66720 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
66721+#endif
66722
66723 if (c) {
66724 c->name = name;
66725@@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
66726 {
66727 void *b;
66728
66729+#ifdef CONFIG_PAX_USERCOPY
66730+ b = __kmalloc_node_align(c->size, flags, node, c->align);
66731+#else
66732 if (c->size < PAGE_SIZE) {
66733 b = slob_alloc(c->size, flags, c->align, node);
66734 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66735 SLOB_UNITS(c->size) * SLOB_UNIT,
66736 flags, node);
66737 } else {
66738+ struct slob_page *sp;
66739+
66740 b = slob_new_pages(flags, get_order(c->size), node);
66741+ sp = slob_page(b);
66742+ sp->size = c->size;
66743 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66744 PAGE_SIZE << get_order(c->size),
66745 flags, node);
66746 }
66747+#endif
66748
66749 if (c->ctor)
66750 c->ctor(b);
66751@@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
66752
66753 static void __kmem_cache_free(void *b, int size)
66754 {
66755- if (size < PAGE_SIZE)
66756+ struct slob_page *sp = slob_page(b);
66757+
66758+ if (is_slob_page(sp))
66759 slob_free(b, size);
66760- else
66761+ else {
66762+ clear_slob_page(sp);
66763+ free_slob_page(sp);
66764+ sp->size = 0;
66765 slob_free_pages(b, get_order(size));
66766+ }
66767 }
66768
66769 static void kmem_rcu_free(struct rcu_head *head)
66770@@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
66771
66772 void kmem_cache_free(struct kmem_cache *c, void *b)
66773 {
66774+ int size = c->size;
66775+
66776+#ifdef CONFIG_PAX_USERCOPY
66777+ if (size + c->align < PAGE_SIZE) {
66778+ size += c->align;
66779+ b -= c->align;
66780+ }
66781+#endif
66782+
66783 kmemleak_free_recursive(b, c->flags);
66784 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
66785 struct slob_rcu *slob_rcu;
66786- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
66787- slob_rcu->size = c->size;
66788+ slob_rcu = b + (size - sizeof(struct slob_rcu));
66789+ slob_rcu->size = size;
66790 call_rcu(&slob_rcu->head, kmem_rcu_free);
66791 } else {
66792- __kmem_cache_free(b, c->size);
66793+ __kmem_cache_free(b, size);
66794 }
66795
66796+#ifdef CONFIG_PAX_USERCOPY
66797+ trace_kfree(_RET_IP_, b);
66798+#else
66799 trace_kmem_cache_free(_RET_IP_, b);
66800+#endif
66801+
66802 }
66803 EXPORT_SYMBOL(kmem_cache_free);
66804
66805diff -urNp linux-3.0.4/mm/slub.c linux-3.0.4/mm/slub.c
66806--- linux-3.0.4/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
66807+++ linux-3.0.4/mm/slub.c 2011-08-23 21:48:14.000000000 -0400
66808@@ -442,7 +442,7 @@ static void print_track(const char *s, s
66809 if (!t->addr)
66810 return;
66811
66812- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
66813+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
66814 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
66815 }
66816
66817@@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
66818
66819 page = virt_to_head_page(x);
66820
66821+ BUG_ON(!PageSlab(page));
66822+
66823 slab_free(s, page, x, _RET_IP_);
66824
66825 trace_kmem_cache_free(_RET_IP_, x);
66826@@ -2170,7 +2172,7 @@ static int slub_min_objects;
66827 * Merge control. If this is set then no merging of slab caches will occur.
66828 * (Could be removed. This was introduced to pacify the merge skeptics.)
66829 */
66830-static int slub_nomerge;
66831+static int slub_nomerge = 1;
66832
66833 /*
66834 * Calculate the order of allocation given an slab object size.
66835@@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
66836 * list to avoid pounding the page allocator excessively.
66837 */
66838 set_min_partial(s, ilog2(s->size));
66839- s->refcount = 1;
66840+ atomic_set(&s->refcount, 1);
66841 #ifdef CONFIG_NUMA
66842 s->remote_node_defrag_ratio = 1000;
66843 #endif
66844@@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
66845 void kmem_cache_destroy(struct kmem_cache *s)
66846 {
66847 down_write(&slub_lock);
66848- s->refcount--;
66849- if (!s->refcount) {
66850+ if (atomic_dec_and_test(&s->refcount)) {
66851 list_del(&s->list);
66852 if (kmem_cache_close(s)) {
66853 printk(KERN_ERR "SLUB %s: %s called for cache that "
66854@@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
66855 EXPORT_SYMBOL(__kmalloc_node);
66856 #endif
66857
66858+void check_object_size(const void *ptr, unsigned long n, bool to)
66859+{
66860+
66861+#ifdef CONFIG_PAX_USERCOPY
66862+ struct page *page;
66863+ struct kmem_cache *s = NULL;
66864+ unsigned long offset;
66865+
66866+ if (!n)
66867+ return;
66868+
66869+ if (ZERO_OR_NULL_PTR(ptr))
66870+ goto report;
66871+
66872+ if (!virt_addr_valid(ptr))
66873+ return;
66874+
66875+ page = virt_to_head_page(ptr);
66876+
66877+ if (!PageSlab(page)) {
66878+ if (object_is_on_stack(ptr, n) == -1)
66879+ goto report;
66880+ return;
66881+ }
66882+
66883+ s = page->slab;
66884+ if (!(s->flags & SLAB_USERCOPY))
66885+ goto report;
66886+
66887+ offset = (ptr - page_address(page)) % s->size;
66888+ if (offset <= s->objsize && n <= s->objsize - offset)
66889+ return;
66890+
66891+report:
66892+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
66893+#endif
66894+
66895+}
66896+EXPORT_SYMBOL(check_object_size);
66897+
66898 size_t ksize(const void *object)
66899 {
66900 struct page *page;
66901@@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
66902 int node;
66903
66904 list_add(&s->list, &slab_caches);
66905- s->refcount = -1;
66906+ atomic_set(&s->refcount, -1);
66907
66908 for_each_node_state(node, N_NORMAL_MEMORY) {
66909 struct kmem_cache_node *n = get_node(s, node);
66910@@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
66911
66912 /* Caches that are not of the two-to-the-power-of size */
66913 if (KMALLOC_MIN_SIZE <= 32) {
66914- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
66915+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
66916 caches++;
66917 }
66918
66919 if (KMALLOC_MIN_SIZE <= 64) {
66920- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
66921+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
66922 caches++;
66923 }
66924
66925 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
66926- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
66927+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
66928 caches++;
66929 }
66930
66931@@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
66932 /*
66933 * We may have set a slab to be unmergeable during bootstrap.
66934 */
66935- if (s->refcount < 0)
66936+ if (atomic_read(&s->refcount) < 0)
66937 return 1;
66938
66939 return 0;
66940@@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
66941 down_write(&slub_lock);
66942 s = find_mergeable(size, align, flags, name, ctor);
66943 if (s) {
66944- s->refcount++;
66945+ atomic_inc(&s->refcount);
66946 /*
66947 * Adjust the object sizes so that we clear
66948 * the complete object on kzalloc.
66949@@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
66950 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
66951
66952 if (sysfs_slab_alias(s, name)) {
66953- s->refcount--;
66954+ atomic_dec(&s->refcount);
66955 goto err;
66956 }
66957 up_write(&slub_lock);
66958@@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
66959
66960 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
66961 {
66962- return sprintf(buf, "%d\n", s->refcount - 1);
66963+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
66964 }
66965 SLAB_ATTR_RO(aliases);
66966
66967@@ -4894,7 +4935,13 @@ static const struct file_operations proc
66968
66969 static int __init slab_proc_init(void)
66970 {
66971- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
66972+ mode_t gr_mode = S_IRUGO;
66973+
66974+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66975+ gr_mode = S_IRUSR;
66976+#endif
66977+
66978+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
66979 return 0;
66980 }
66981 module_init(slab_proc_init);
66982diff -urNp linux-3.0.4/mm/swap.c linux-3.0.4/mm/swap.c
66983--- linux-3.0.4/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
66984+++ linux-3.0.4/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
66985@@ -31,6 +31,7 @@
66986 #include <linux/backing-dev.h>
66987 #include <linux/memcontrol.h>
66988 #include <linux/gfp.h>
66989+#include <linux/hugetlb.h>
66990
66991 #include "internal.h"
66992
66993@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
66994
66995 __page_cache_release(page);
66996 dtor = get_compound_page_dtor(page);
66997+ if (!PageHuge(page))
66998+ BUG_ON(dtor != free_compound_page);
66999 (*dtor)(page);
67000 }
67001
67002diff -urNp linux-3.0.4/mm/swapfile.c linux-3.0.4/mm/swapfile.c
67003--- linux-3.0.4/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
67004+++ linux-3.0.4/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
67005@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
67006
67007 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
67008 /* Activity counter to indicate that a swapon or swapoff has occurred */
67009-static atomic_t proc_poll_event = ATOMIC_INIT(0);
67010+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
67011
67012 static inline unsigned char swap_count(unsigned char ent)
67013 {
67014@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
67015 }
67016 filp_close(swap_file, NULL);
67017 err = 0;
67018- atomic_inc(&proc_poll_event);
67019+ atomic_inc_unchecked(&proc_poll_event);
67020 wake_up_interruptible(&proc_poll_wait);
67021
67022 out_dput:
67023@@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
67024
67025 poll_wait(file, &proc_poll_wait, wait);
67026
67027- if (s->event != atomic_read(&proc_poll_event)) {
67028- s->event = atomic_read(&proc_poll_event);
67029+ if (s->event != atomic_read_unchecked(&proc_poll_event)) {
67030+ s->event = atomic_read_unchecked(&proc_poll_event);
67031 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
67032 }
67033
67034@@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
67035 }
67036
67037 s->seq.private = s;
67038- s->event = atomic_read(&proc_poll_event);
67039+ s->event = atomic_read_unchecked(&proc_poll_event);
67040 return ret;
67041 }
67042
67043@@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
67044 (p->flags & SWP_DISCARDABLE) ? "D" : "");
67045
67046 mutex_unlock(&swapon_mutex);
67047- atomic_inc(&proc_poll_event);
67048+ atomic_inc_unchecked(&proc_poll_event);
67049 wake_up_interruptible(&proc_poll_wait);
67050
67051 if (S_ISREG(inode->i_mode))
67052diff -urNp linux-3.0.4/mm/util.c linux-3.0.4/mm/util.c
67053--- linux-3.0.4/mm/util.c 2011-07-21 22:17:23.000000000 -0400
67054+++ linux-3.0.4/mm/util.c 2011-08-23 21:47:56.000000000 -0400
67055@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
67056 * allocated buffer. Use this if you don't want to free the buffer immediately
67057 * like, for example, with RCU.
67058 */
67059+#undef __krealloc
67060 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
67061 {
67062 void *ret;
67063@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
67064 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
67065 * %NULL pointer, the object pointed to is freed.
67066 */
67067+#undef krealloc
67068 void *krealloc(const void *p, size_t new_size, gfp_t flags)
67069 {
67070 void *ret;
67071@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
67072 void arch_pick_mmap_layout(struct mm_struct *mm)
67073 {
67074 mm->mmap_base = TASK_UNMAPPED_BASE;
67075+
67076+#ifdef CONFIG_PAX_RANDMMAP
67077+ if (mm->pax_flags & MF_PAX_RANDMMAP)
67078+ mm->mmap_base += mm->delta_mmap;
67079+#endif
67080+
67081 mm->get_unmapped_area = arch_get_unmapped_area;
67082 mm->unmap_area = arch_unmap_area;
67083 }
67084diff -urNp linux-3.0.4/mm/vmalloc.c linux-3.0.4/mm/vmalloc.c
67085--- linux-3.0.4/mm/vmalloc.c 2011-08-23 21:44:40.000000000 -0400
67086+++ linux-3.0.4/mm/vmalloc.c 2011-08-23 21:47:56.000000000 -0400
67087@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
67088
67089 pte = pte_offset_kernel(pmd, addr);
67090 do {
67091- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67092- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67093+
67094+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67095+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
67096+ BUG_ON(!pte_exec(*pte));
67097+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
67098+ continue;
67099+ }
67100+#endif
67101+
67102+ {
67103+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67104+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67105+ }
67106 } while (pte++, addr += PAGE_SIZE, addr != end);
67107 }
67108
67109@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
67110 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
67111 {
67112 pte_t *pte;
67113+ int ret = -ENOMEM;
67114
67115 /*
67116 * nr is a running index into the array which helps higher level
67117@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
67118 pte = pte_alloc_kernel(pmd, addr);
67119 if (!pte)
67120 return -ENOMEM;
67121+
67122+ pax_open_kernel();
67123 do {
67124 struct page *page = pages[*nr];
67125
67126- if (WARN_ON(!pte_none(*pte)))
67127- return -EBUSY;
67128- if (WARN_ON(!page))
67129- return -ENOMEM;
67130+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67131+ if (pgprot_val(prot) & _PAGE_NX)
67132+#endif
67133+
67134+ if (WARN_ON(!pte_none(*pte))) {
67135+ ret = -EBUSY;
67136+ goto out;
67137+ }
67138+ if (WARN_ON(!page)) {
67139+ ret = -ENOMEM;
67140+ goto out;
67141+ }
67142 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
67143 (*nr)++;
67144 } while (pte++, addr += PAGE_SIZE, addr != end);
67145- return 0;
67146+ ret = 0;
67147+out:
67148+ pax_close_kernel();
67149+ return ret;
67150 }
67151
67152 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
67153@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
67154 * and fall back on vmalloc() if that fails. Others
67155 * just put it in the vmalloc space.
67156 */
67157-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
67158+#ifdef CONFIG_MODULES
67159+#ifdef MODULES_VADDR
67160 unsigned long addr = (unsigned long)x;
67161 if (addr >= MODULES_VADDR && addr < MODULES_END)
67162 return 1;
67163 #endif
67164+
67165+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67166+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
67167+ return 1;
67168+#endif
67169+
67170+#endif
67171+
67172 return is_vmalloc_addr(x);
67173 }
67174
67175@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
67176
67177 if (!pgd_none(*pgd)) {
67178 pud_t *pud = pud_offset(pgd, addr);
67179+#ifdef CONFIG_X86
67180+ if (!pud_large(*pud))
67181+#endif
67182 if (!pud_none(*pud)) {
67183 pmd_t *pmd = pmd_offset(pud, addr);
67184+#ifdef CONFIG_X86
67185+ if (!pmd_large(*pmd))
67186+#endif
67187 if (!pmd_none(*pmd)) {
67188 pte_t *ptep, pte;
67189
67190@@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
67191 struct vm_struct *area;
67192
67193 BUG_ON(in_interrupt());
67194+
67195+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67196+ if (flags & VM_KERNEXEC) {
67197+ if (start != VMALLOC_START || end != VMALLOC_END)
67198+ return NULL;
67199+ start = (unsigned long)MODULES_EXEC_VADDR;
67200+ end = (unsigned long)MODULES_EXEC_END;
67201+ }
67202+#endif
67203+
67204 if (flags & VM_IOREMAP) {
67205 int bit = fls(size);
67206
67207@@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
67208 if (count > totalram_pages)
67209 return NULL;
67210
67211+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67212+ if (!(pgprot_val(prot) & _PAGE_NX))
67213+ flags |= VM_KERNEXEC;
67214+#endif
67215+
67216 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
67217 __builtin_return_address(0));
67218 if (!area)
67219@@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
67220 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
67221 return NULL;
67222
67223+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67224+ if (!(pgprot_val(prot) & _PAGE_NX))
67225+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
67226+ node, gfp_mask, caller);
67227+ else
67228+#endif
67229+
67230 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
67231 gfp_mask, caller);
67232
67233@@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
67234 gfp_mask, prot, node, caller);
67235 }
67236
67237+#undef __vmalloc
67238 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
67239 {
67240 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
67241@@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
67242 * For tight control over page level allocator and protection flags
67243 * use __vmalloc() instead.
67244 */
67245+#undef vmalloc
67246 void *vmalloc(unsigned long size)
67247 {
67248 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
67249@@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
67250 * For tight control over page level allocator and protection flags
67251 * use __vmalloc() instead.
67252 */
67253+#undef vzalloc
67254 void *vzalloc(unsigned long size)
67255 {
67256 return __vmalloc_node_flags(size, -1,
67257@@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
67258 * The resulting memory area is zeroed so it can be mapped to userspace
67259 * without leaking data.
67260 */
67261+#undef vmalloc_user
67262 void *vmalloc_user(unsigned long size)
67263 {
67264 struct vm_struct *area;
67265@@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
67266 * For tight control over page level allocator and protection flags
67267 * use __vmalloc() instead.
67268 */
67269+#undef vmalloc_node
67270 void *vmalloc_node(unsigned long size, int node)
67271 {
67272 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
67273@@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
67274 * For tight control over page level allocator and protection flags
67275 * use __vmalloc_node() instead.
67276 */
67277+#undef vzalloc_node
67278 void *vzalloc_node(unsigned long size, int node)
67279 {
67280 return __vmalloc_node_flags(size, node,
67281@@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
67282 * For tight control over page level allocator and protection flags
67283 * use __vmalloc() instead.
67284 */
67285-
67286+#undef vmalloc_exec
67287 void *vmalloc_exec(unsigned long size)
67288 {
67289- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
67290+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
67291 -1, __builtin_return_address(0));
67292 }
67293
67294@@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
67295 * Allocate enough 32bit PA addressable pages to cover @size from the
67296 * page level allocator and map them into contiguous kernel virtual space.
67297 */
67298+#undef vmalloc_32
67299 void *vmalloc_32(unsigned long size)
67300 {
67301 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
67302@@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
67303 * The resulting memory area is 32bit addressable and zeroed so it can be
67304 * mapped to userspace without leaking data.
67305 */
67306+#undef vmalloc_32_user
67307 void *vmalloc_32_user(unsigned long size)
67308 {
67309 struct vm_struct *area;
67310@@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
67311 unsigned long uaddr = vma->vm_start;
67312 unsigned long usize = vma->vm_end - vma->vm_start;
67313
67314+ BUG_ON(vma->vm_mirror);
67315+
67316 if ((PAGE_SIZE-1) & (unsigned long)addr)
67317 return -EINVAL;
67318
67319diff -urNp linux-3.0.4/mm/vmstat.c linux-3.0.4/mm/vmstat.c
67320--- linux-3.0.4/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
67321+++ linux-3.0.4/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
67322@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
67323 *
67324 * vm_stat contains the global counters
67325 */
67326-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67327+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67328 EXPORT_SYMBOL(vm_stat);
67329
67330 #ifdef CONFIG_SMP
67331@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
67332 v = p->vm_stat_diff[i];
67333 p->vm_stat_diff[i] = 0;
67334 local_irq_restore(flags);
67335- atomic_long_add(v, &zone->vm_stat[i]);
67336+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
67337 global_diff[i] += v;
67338 #ifdef CONFIG_NUMA
67339 /* 3 seconds idle till flush */
67340@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
67341
67342 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
67343 if (global_diff[i])
67344- atomic_long_add(global_diff[i], &vm_stat[i]);
67345+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
67346 }
67347
67348 #endif
67349@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
67350 start_cpu_timer(cpu);
67351 #endif
67352 #ifdef CONFIG_PROC_FS
67353- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
67354- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
67355- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
67356- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
67357+ {
67358+ mode_t gr_mode = S_IRUGO;
67359+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67360+ gr_mode = S_IRUSR;
67361+#endif
67362+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
67363+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
67364+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67365+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
67366+#else
67367+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
67368+#endif
67369+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
67370+ }
67371 #endif
67372 return 0;
67373 }
67374diff -urNp linux-3.0.4/net/8021q/vlan.c linux-3.0.4/net/8021q/vlan.c
67375--- linux-3.0.4/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
67376+++ linux-3.0.4/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
67377@@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
67378 err = -EPERM;
67379 if (!capable(CAP_NET_ADMIN))
67380 break;
67381- if ((args.u.name_type >= 0) &&
67382- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
67383+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
67384 struct vlan_net *vn;
67385
67386 vn = net_generic(net, vlan_net_id);
67387diff -urNp linux-3.0.4/net/atm/atm_misc.c linux-3.0.4/net/atm/atm_misc.c
67388--- linux-3.0.4/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
67389+++ linux-3.0.4/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
67390@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
67391 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
67392 return 1;
67393 atm_return(vcc, truesize);
67394- atomic_inc(&vcc->stats->rx_drop);
67395+ atomic_inc_unchecked(&vcc->stats->rx_drop);
67396 return 0;
67397 }
67398 EXPORT_SYMBOL(atm_charge);
67399@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
67400 }
67401 }
67402 atm_return(vcc, guess);
67403- atomic_inc(&vcc->stats->rx_drop);
67404+ atomic_inc_unchecked(&vcc->stats->rx_drop);
67405 return NULL;
67406 }
67407 EXPORT_SYMBOL(atm_alloc_charge);
67408@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
67409
67410 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
67411 {
67412-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67413+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67414 __SONET_ITEMS
67415 #undef __HANDLE_ITEM
67416 }
67417@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
67418
67419 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
67420 {
67421-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
67422+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
67423 __SONET_ITEMS
67424 #undef __HANDLE_ITEM
67425 }
67426diff -urNp linux-3.0.4/net/atm/lec.h linux-3.0.4/net/atm/lec.h
67427--- linux-3.0.4/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
67428+++ linux-3.0.4/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
67429@@ -48,7 +48,7 @@ struct lane2_ops {
67430 const u8 *tlvs, u32 sizeoftlvs);
67431 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
67432 const u8 *tlvs, u32 sizeoftlvs);
67433-};
67434+} __no_const;
67435
67436 /*
67437 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
67438diff -urNp linux-3.0.4/net/atm/mpc.h linux-3.0.4/net/atm/mpc.h
67439--- linux-3.0.4/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
67440+++ linux-3.0.4/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
67441@@ -33,7 +33,7 @@ struct mpoa_client {
67442 struct mpc_parameters parameters; /* parameters for this client */
67443
67444 const struct net_device_ops *old_ops;
67445- struct net_device_ops new_ops;
67446+ net_device_ops_no_const new_ops;
67447 };
67448
67449
67450diff -urNp linux-3.0.4/net/atm/mpoa_caches.c linux-3.0.4/net/atm/mpoa_caches.c
67451--- linux-3.0.4/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
67452+++ linux-3.0.4/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
67453@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
67454 struct timeval now;
67455 struct k_message msg;
67456
67457+ pax_track_stack();
67458+
67459 do_gettimeofday(&now);
67460
67461 read_lock_bh(&client->ingress_lock);
67462diff -urNp linux-3.0.4/net/atm/proc.c linux-3.0.4/net/atm/proc.c
67463--- linux-3.0.4/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
67464+++ linux-3.0.4/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
67465@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
67466 const struct k_atm_aal_stats *stats)
67467 {
67468 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
67469- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
67470- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
67471- atomic_read(&stats->rx_drop));
67472+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
67473+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
67474+ atomic_read_unchecked(&stats->rx_drop));
67475 }
67476
67477 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
67478diff -urNp linux-3.0.4/net/atm/resources.c linux-3.0.4/net/atm/resources.c
67479--- linux-3.0.4/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
67480+++ linux-3.0.4/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
67481@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
67482 static void copy_aal_stats(struct k_atm_aal_stats *from,
67483 struct atm_aal_stats *to)
67484 {
67485-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67486+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67487 __AAL_STAT_ITEMS
67488 #undef __HANDLE_ITEM
67489 }
67490@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
67491 static void subtract_aal_stats(struct k_atm_aal_stats *from,
67492 struct atm_aal_stats *to)
67493 {
67494-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
67495+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
67496 __AAL_STAT_ITEMS
67497 #undef __HANDLE_ITEM
67498 }
67499diff -urNp linux-3.0.4/net/batman-adv/hard-interface.c linux-3.0.4/net/batman-adv/hard-interface.c
67500--- linux-3.0.4/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
67501+++ linux-3.0.4/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
67502@@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
67503 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
67504 dev_add_pack(&hard_iface->batman_adv_ptype);
67505
67506- atomic_set(&hard_iface->seqno, 1);
67507- atomic_set(&hard_iface->frag_seqno, 1);
67508+ atomic_set_unchecked(&hard_iface->seqno, 1);
67509+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
67510 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
67511 hard_iface->net_dev->name);
67512
67513diff -urNp linux-3.0.4/net/batman-adv/routing.c linux-3.0.4/net/batman-adv/routing.c
67514--- linux-3.0.4/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
67515+++ linux-3.0.4/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
67516@@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
67517 return;
67518
67519 /* could be changed by schedule_own_packet() */
67520- if_incoming_seqno = atomic_read(&if_incoming->seqno);
67521+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
67522
67523 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
67524
67525diff -urNp linux-3.0.4/net/batman-adv/send.c linux-3.0.4/net/batman-adv/send.c
67526--- linux-3.0.4/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
67527+++ linux-3.0.4/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
67528@@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
67529
67530 /* change sequence number to network order */
67531 batman_packet->seqno =
67532- htonl((uint32_t)atomic_read(&hard_iface->seqno));
67533+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
67534
67535 if (vis_server == VIS_TYPE_SERVER_SYNC)
67536 batman_packet->flags |= VIS_SERVER;
67537@@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
67538 else
67539 batman_packet->gw_flags = 0;
67540
67541- atomic_inc(&hard_iface->seqno);
67542+ atomic_inc_unchecked(&hard_iface->seqno);
67543
67544 slide_own_bcast_window(hard_iface);
67545 send_time = own_send_time(bat_priv);
67546diff -urNp linux-3.0.4/net/batman-adv/soft-interface.c linux-3.0.4/net/batman-adv/soft-interface.c
67547--- linux-3.0.4/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
67548+++ linux-3.0.4/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
67549@@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
67550
67551 /* set broadcast sequence number */
67552 bcast_packet->seqno =
67553- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
67554+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
67555
67556 add_bcast_packet_to_list(bat_priv, skb);
67557
67558@@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
67559 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
67560
67561 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
67562- atomic_set(&bat_priv->bcast_seqno, 1);
67563+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
67564 atomic_set(&bat_priv->tt_local_changed, 0);
67565
67566 bat_priv->primary_if = NULL;
67567diff -urNp linux-3.0.4/net/batman-adv/types.h linux-3.0.4/net/batman-adv/types.h
67568--- linux-3.0.4/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
67569+++ linux-3.0.4/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
67570@@ -38,8 +38,8 @@ struct hard_iface {
67571 int16_t if_num;
67572 char if_status;
67573 struct net_device *net_dev;
67574- atomic_t seqno;
67575- atomic_t frag_seqno;
67576+ atomic_unchecked_t seqno;
67577+ atomic_unchecked_t frag_seqno;
67578 unsigned char *packet_buff;
67579 int packet_len;
67580 struct kobject *hardif_obj;
67581@@ -142,7 +142,7 @@ struct bat_priv {
67582 atomic_t orig_interval; /* uint */
67583 atomic_t hop_penalty; /* uint */
67584 atomic_t log_level; /* uint */
67585- atomic_t bcast_seqno;
67586+ atomic_unchecked_t bcast_seqno;
67587 atomic_t bcast_queue_left;
67588 atomic_t batman_queue_left;
67589 char num_ifaces;
67590diff -urNp linux-3.0.4/net/batman-adv/unicast.c linux-3.0.4/net/batman-adv/unicast.c
67591--- linux-3.0.4/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
67592+++ linux-3.0.4/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
67593@@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
67594 frag1->flags = UNI_FRAG_HEAD | large_tail;
67595 frag2->flags = large_tail;
67596
67597- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
67598+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
67599 frag1->seqno = htons(seqno - 1);
67600 frag2->seqno = htons(seqno);
67601
67602diff -urNp linux-3.0.4/net/bridge/br_multicast.c linux-3.0.4/net/bridge/br_multicast.c
67603--- linux-3.0.4/net/bridge/br_multicast.c 2011-07-21 22:17:23.000000000 -0400
67604+++ linux-3.0.4/net/bridge/br_multicast.c 2011-08-23 21:47:56.000000000 -0400
67605@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
67606 nexthdr = ip6h->nexthdr;
67607 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
67608
67609- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
67610+ if (nexthdr != IPPROTO_ICMPV6)
67611 return 0;
67612
67613 /* Okay, we found ICMPv6 header */
67614diff -urNp linux-3.0.4/net/bridge/netfilter/ebtables.c linux-3.0.4/net/bridge/netfilter/ebtables.c
67615--- linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
67616+++ linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
67617@@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
67618 tmp.valid_hooks = t->table->valid_hooks;
67619 }
67620 mutex_unlock(&ebt_mutex);
67621- if (copy_to_user(user, &tmp, *len) != 0){
67622+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
67623 BUGPRINT("c2u Didn't work\n");
67624 ret = -EFAULT;
67625 break;
67626@@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
67627 int ret;
67628 void __user *pos;
67629
67630+ pax_track_stack();
67631+
67632 memset(&tinfo, 0, sizeof(tinfo));
67633
67634 if (cmd == EBT_SO_GET_ENTRIES) {
67635diff -urNp linux-3.0.4/net/caif/caif_socket.c linux-3.0.4/net/caif/caif_socket.c
67636--- linux-3.0.4/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
67637+++ linux-3.0.4/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
67638@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
67639 #ifdef CONFIG_DEBUG_FS
67640 struct debug_fs_counter {
67641 atomic_t caif_nr_socks;
67642- atomic_t caif_sock_create;
67643- atomic_t num_connect_req;
67644- atomic_t num_connect_resp;
67645- atomic_t num_connect_fail_resp;
67646- atomic_t num_disconnect;
67647- atomic_t num_remote_shutdown_ind;
67648- atomic_t num_tx_flow_off_ind;
67649- atomic_t num_tx_flow_on_ind;
67650- atomic_t num_rx_flow_off;
67651- atomic_t num_rx_flow_on;
67652+ atomic_unchecked_t caif_sock_create;
67653+ atomic_unchecked_t num_connect_req;
67654+ atomic_unchecked_t num_connect_resp;
67655+ atomic_unchecked_t num_connect_fail_resp;
67656+ atomic_unchecked_t num_disconnect;
67657+ atomic_unchecked_t num_remote_shutdown_ind;
67658+ atomic_unchecked_t num_tx_flow_off_ind;
67659+ atomic_unchecked_t num_tx_flow_on_ind;
67660+ atomic_unchecked_t num_rx_flow_off;
67661+ atomic_unchecked_t num_rx_flow_on;
67662 };
67663 static struct debug_fs_counter cnt;
67664 #define dbfs_atomic_inc(v) atomic_inc_return(v)
67665+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
67666 #define dbfs_atomic_dec(v) atomic_dec_return(v)
67667 #else
67668 #define dbfs_atomic_inc(v) 0
67669@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
67670 atomic_read(&cf_sk->sk.sk_rmem_alloc),
67671 sk_rcvbuf_lowwater(cf_sk));
67672 set_rx_flow_off(cf_sk);
67673- dbfs_atomic_inc(&cnt.num_rx_flow_off);
67674+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
67675 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
67676 }
67677
67678@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
67679 set_rx_flow_off(cf_sk);
67680 if (net_ratelimit())
67681 pr_debug("sending flow OFF due to rmem_schedule\n");
67682- dbfs_atomic_inc(&cnt.num_rx_flow_off);
67683+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
67684 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
67685 }
67686 skb->dev = NULL;
67687@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
67688 switch (flow) {
67689 case CAIF_CTRLCMD_FLOW_ON_IND:
67690 /* OK from modem to start sending again */
67691- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
67692+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
67693 set_tx_flow_on(cf_sk);
67694 cf_sk->sk.sk_state_change(&cf_sk->sk);
67695 break;
67696
67697 case CAIF_CTRLCMD_FLOW_OFF_IND:
67698 /* Modem asks us to shut up */
67699- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
67700+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
67701 set_tx_flow_off(cf_sk);
67702 cf_sk->sk.sk_state_change(&cf_sk->sk);
67703 break;
67704@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
67705 /* We're now connected */
67706 caif_client_register_refcnt(&cf_sk->layer,
67707 cfsk_hold, cfsk_put);
67708- dbfs_atomic_inc(&cnt.num_connect_resp);
67709+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
67710 cf_sk->sk.sk_state = CAIF_CONNECTED;
67711 set_tx_flow_on(cf_sk);
67712 cf_sk->sk.sk_state_change(&cf_sk->sk);
67713@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
67714
67715 case CAIF_CTRLCMD_INIT_FAIL_RSP:
67716 /* Connect request failed */
67717- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
67718+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
67719 cf_sk->sk.sk_err = ECONNREFUSED;
67720 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
67721 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
67722@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
67723
67724 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
67725 /* Modem has closed this connection, or device is down. */
67726- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
67727+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
67728 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
67729 cf_sk->sk.sk_err = ECONNRESET;
67730 set_rx_flow_on(cf_sk);
67731@@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
67732 return;
67733
67734 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
67735- dbfs_atomic_inc(&cnt.num_rx_flow_on);
67736+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
67737 set_rx_flow_on(cf_sk);
67738 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
67739 }
67740@@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
67741 /*ifindex = id of the interface.*/
67742 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
67743
67744- dbfs_atomic_inc(&cnt.num_connect_req);
67745+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
67746 cf_sk->layer.receive = caif_sktrecv_cb;
67747
67748 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
67749@@ -943,7 +944,7 @@ static int caif_release(struct socket *s
67750 spin_unlock_bh(&sk->sk_receive_queue.lock);
67751 sock->sk = NULL;
67752
67753- dbfs_atomic_inc(&cnt.num_disconnect);
67754+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
67755
67756 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
67757 if (cf_sk->debugfs_socket_dir != NULL)
67758@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
67759 cf_sk->conn_req.protocol = protocol;
67760 /* Increase the number of sockets created. */
67761 dbfs_atomic_inc(&cnt.caif_nr_socks);
67762- num = dbfs_atomic_inc(&cnt.caif_sock_create);
67763+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
67764 #ifdef CONFIG_DEBUG_FS
67765 if (!IS_ERR(debugfsdir)) {
67766
67767diff -urNp linux-3.0.4/net/caif/cfctrl.c linux-3.0.4/net/caif/cfctrl.c
67768--- linux-3.0.4/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
67769+++ linux-3.0.4/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
67770@@ -9,6 +9,7 @@
67771 #include <linux/stddef.h>
67772 #include <linux/spinlock.h>
67773 #include <linux/slab.h>
67774+#include <linux/sched.h>
67775 #include <net/caif/caif_layer.h>
67776 #include <net/caif/cfpkt.h>
67777 #include <net/caif/cfctrl.h>
67778@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
67779 dev_info.id = 0xff;
67780 memset(this, 0, sizeof(*this));
67781 cfsrvl_init(&this->serv, 0, &dev_info, false);
67782- atomic_set(&this->req_seq_no, 1);
67783- atomic_set(&this->rsp_seq_no, 1);
67784+ atomic_set_unchecked(&this->req_seq_no, 1);
67785+ atomic_set_unchecked(&this->rsp_seq_no, 1);
67786 this->serv.layer.receive = cfctrl_recv;
67787 sprintf(this->serv.layer.name, "ctrl");
67788 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
67789@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
67790 struct cfctrl_request_info *req)
67791 {
67792 spin_lock_bh(&ctrl->info_list_lock);
67793- atomic_inc(&ctrl->req_seq_no);
67794- req->sequence_no = atomic_read(&ctrl->req_seq_no);
67795+ atomic_inc_unchecked(&ctrl->req_seq_no);
67796+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
67797 list_add_tail(&req->list, &ctrl->list);
67798 spin_unlock_bh(&ctrl->info_list_lock);
67799 }
67800@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
67801 if (p != first)
67802 pr_warn("Requests are not received in order\n");
67803
67804- atomic_set(&ctrl->rsp_seq_no,
67805+ atomic_set_unchecked(&ctrl->rsp_seq_no,
67806 p->sequence_no);
67807 list_del(&p->list);
67808 goto out;
67809@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
67810 struct cfctrl *cfctrl = container_obj(layer);
67811 struct cfctrl_request_info rsp, *req;
67812
67813+ pax_track_stack();
67814
67815 cfpkt_extr_head(pkt, &cmdrsp, 1);
67816 cmd = cmdrsp & CFCTRL_CMD_MASK;
67817diff -urNp linux-3.0.4/net/core/datagram.c linux-3.0.4/net/core/datagram.c
67818--- linux-3.0.4/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
67819+++ linux-3.0.4/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
67820@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
67821 }
67822
67823 kfree_skb(skb);
67824- atomic_inc(&sk->sk_drops);
67825+ atomic_inc_unchecked(&sk->sk_drops);
67826 sk_mem_reclaim_partial(sk);
67827
67828 return err;
67829diff -urNp linux-3.0.4/net/core/dev.c linux-3.0.4/net/core/dev.c
67830--- linux-3.0.4/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
67831+++ linux-3.0.4/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
67832@@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
67833 if (no_module && capable(CAP_NET_ADMIN))
67834 no_module = request_module("netdev-%s", name);
67835 if (no_module && capable(CAP_SYS_MODULE)) {
67836+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67837+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
67838+#else
67839 if (!request_module("%s", name))
67840 pr_err("Loading kernel module for a network device "
67841 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
67842 "instead\n", name);
67843+#endif
67844 }
67845 }
67846 EXPORT_SYMBOL(dev_load);
67847@@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
67848
67849 struct dev_gso_cb {
67850 void (*destructor)(struct sk_buff *skb);
67851-};
67852+} __no_const;
67853
67854 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
67855
67856@@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
67857 }
67858 EXPORT_SYMBOL(netif_rx_ni);
67859
67860-static void net_tx_action(struct softirq_action *h)
67861+static void net_tx_action(void)
67862 {
67863 struct softnet_data *sd = &__get_cpu_var(softnet_data);
67864
67865@@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
67866 }
67867 EXPORT_SYMBOL(netif_napi_del);
67868
67869-static void net_rx_action(struct softirq_action *h)
67870+static void net_rx_action(void)
67871 {
67872 struct softnet_data *sd = &__get_cpu_var(softnet_data);
67873 unsigned long time_limit = jiffies + 2;
67874diff -urNp linux-3.0.4/net/core/flow.c linux-3.0.4/net/core/flow.c
67875--- linux-3.0.4/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
67876+++ linux-3.0.4/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
67877@@ -60,7 +60,7 @@ struct flow_cache {
67878 struct timer_list rnd_timer;
67879 };
67880
67881-atomic_t flow_cache_genid = ATOMIC_INIT(0);
67882+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
67883 EXPORT_SYMBOL(flow_cache_genid);
67884 static struct flow_cache flow_cache_global;
67885 static struct kmem_cache *flow_cachep __read_mostly;
67886@@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
67887
67888 static int flow_entry_valid(struct flow_cache_entry *fle)
67889 {
67890- if (atomic_read(&flow_cache_genid) != fle->genid)
67891+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
67892 return 0;
67893 if (fle->object && !fle->object->ops->check(fle->object))
67894 return 0;
67895@@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
67896 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
67897 fcp->hash_count++;
67898 }
67899- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
67900+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
67901 flo = fle->object;
67902 if (!flo)
67903 goto ret_object;
67904@@ -274,7 +274,7 @@ nocache:
67905 }
67906 flo = resolver(net, key, family, dir, flo, ctx);
67907 if (fle) {
67908- fle->genid = atomic_read(&flow_cache_genid);
67909+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
67910 if (!IS_ERR(flo))
67911 fle->object = flo;
67912 else
67913diff -urNp linux-3.0.4/net/core/rtnetlink.c linux-3.0.4/net/core/rtnetlink.c
67914--- linux-3.0.4/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
67915+++ linux-3.0.4/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
67916@@ -56,7 +56,7 @@
67917 struct rtnl_link {
67918 rtnl_doit_func doit;
67919 rtnl_dumpit_func dumpit;
67920-};
67921+} __no_const;
67922
67923 static DEFINE_MUTEX(rtnl_mutex);
67924
67925diff -urNp linux-3.0.4/net/core/skbuff.c linux-3.0.4/net/core/skbuff.c
67926--- linux-3.0.4/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
67927+++ linux-3.0.4/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
67928@@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
67929 struct sock *sk = skb->sk;
67930 int ret = 0;
67931
67932+ pax_track_stack();
67933+
67934 if (splice_grow_spd(pipe, &spd))
67935 return -ENOMEM;
67936
67937diff -urNp linux-3.0.4/net/core/sock.c linux-3.0.4/net/core/sock.c
67938--- linux-3.0.4/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
67939+++ linux-3.0.4/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
67940@@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
67941 */
67942 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
67943 (unsigned)sk->sk_rcvbuf) {
67944- atomic_inc(&sk->sk_drops);
67945+ atomic_inc_unchecked(&sk->sk_drops);
67946 return -ENOMEM;
67947 }
67948
67949@@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
67950 return err;
67951
67952 if (!sk_rmem_schedule(sk, skb->truesize)) {
67953- atomic_inc(&sk->sk_drops);
67954+ atomic_inc_unchecked(&sk->sk_drops);
67955 return -ENOBUFS;
67956 }
67957
67958@@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
67959 skb_dst_force(skb);
67960
67961 spin_lock_irqsave(&list->lock, flags);
67962- skb->dropcount = atomic_read(&sk->sk_drops);
67963+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
67964 __skb_queue_tail(list, skb);
67965 spin_unlock_irqrestore(&list->lock, flags);
67966
67967@@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
67968 skb->dev = NULL;
67969
67970 if (sk_rcvqueues_full(sk, skb)) {
67971- atomic_inc(&sk->sk_drops);
67972+ atomic_inc_unchecked(&sk->sk_drops);
67973 goto discard_and_relse;
67974 }
67975 if (nested)
67976@@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
67977 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
67978 } else if (sk_add_backlog(sk, skb)) {
67979 bh_unlock_sock(sk);
67980- atomic_inc(&sk->sk_drops);
67981+ atomic_inc_unchecked(&sk->sk_drops);
67982 goto discard_and_relse;
67983 }
67984
67985@@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
67986 if (len > sizeof(peercred))
67987 len = sizeof(peercred);
67988 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
67989- if (copy_to_user(optval, &peercred, len))
67990+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
67991 return -EFAULT;
67992 goto lenout;
67993 }
67994@@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
67995 return -ENOTCONN;
67996 if (lv < len)
67997 return -EINVAL;
67998- if (copy_to_user(optval, address, len))
67999+ if (len > sizeof(address) || copy_to_user(optval, address, len))
68000 return -EFAULT;
68001 goto lenout;
68002 }
68003@@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
68004
68005 if (len > lv)
68006 len = lv;
68007- if (copy_to_user(optval, &v, len))
68008+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
68009 return -EFAULT;
68010 lenout:
68011 if (put_user(len, optlen))
68012@@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
68013 */
68014 smp_wmb();
68015 atomic_set(&sk->sk_refcnt, 1);
68016- atomic_set(&sk->sk_drops, 0);
68017+ atomic_set_unchecked(&sk->sk_drops, 0);
68018 }
68019 EXPORT_SYMBOL(sock_init_data);
68020
68021diff -urNp linux-3.0.4/net/decnet/sysctl_net_decnet.c linux-3.0.4/net/decnet/sysctl_net_decnet.c
68022--- linux-3.0.4/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
68023+++ linux-3.0.4/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
68024@@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
68025
68026 if (len > *lenp) len = *lenp;
68027
68028- if (copy_to_user(buffer, addr, len))
68029+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
68030 return -EFAULT;
68031
68032 *lenp = len;
68033@@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
68034
68035 if (len > *lenp) len = *lenp;
68036
68037- if (copy_to_user(buffer, devname, len))
68038+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
68039 return -EFAULT;
68040
68041 *lenp = len;
68042diff -urNp linux-3.0.4/net/econet/Kconfig linux-3.0.4/net/econet/Kconfig
68043--- linux-3.0.4/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
68044+++ linux-3.0.4/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
68045@@ -4,7 +4,7 @@
68046
68047 config ECONET
68048 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
68049- depends on EXPERIMENTAL && INET
68050+ depends on EXPERIMENTAL && INET && BROKEN
68051 ---help---
68052 Econet is a fairly old and slow networking protocol mainly used by
68053 Acorn computers to access file and print servers. It uses native
68054diff -urNp linux-3.0.4/net/ipv4/fib_frontend.c linux-3.0.4/net/ipv4/fib_frontend.c
68055--- linux-3.0.4/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
68056+++ linux-3.0.4/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
68057@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
68058 #ifdef CONFIG_IP_ROUTE_MULTIPATH
68059 fib_sync_up(dev);
68060 #endif
68061- atomic_inc(&net->ipv4.dev_addr_genid);
68062+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68063 rt_cache_flush(dev_net(dev), -1);
68064 break;
68065 case NETDEV_DOWN:
68066 fib_del_ifaddr(ifa, NULL);
68067- atomic_inc(&net->ipv4.dev_addr_genid);
68068+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68069 if (ifa->ifa_dev->ifa_list == NULL) {
68070 /* Last address was deleted from this interface.
68071 * Disable IP.
68072@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
68073 #ifdef CONFIG_IP_ROUTE_MULTIPATH
68074 fib_sync_up(dev);
68075 #endif
68076- atomic_inc(&net->ipv4.dev_addr_genid);
68077+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68078 rt_cache_flush(dev_net(dev), -1);
68079 break;
68080 case NETDEV_DOWN:
68081diff -urNp linux-3.0.4/net/ipv4/fib_semantics.c linux-3.0.4/net/ipv4/fib_semantics.c
68082--- linux-3.0.4/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
68083+++ linux-3.0.4/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
68084@@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
68085 nh->nh_saddr = inet_select_addr(nh->nh_dev,
68086 nh->nh_gw,
68087 nh->nh_parent->fib_scope);
68088- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
68089+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
68090
68091 return nh->nh_saddr;
68092 }
68093diff -urNp linux-3.0.4/net/ipv4/inet_diag.c linux-3.0.4/net/ipv4/inet_diag.c
68094--- linux-3.0.4/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
68095+++ linux-3.0.4/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
68096@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
68097 r->idiag_retrans = 0;
68098
68099 r->id.idiag_if = sk->sk_bound_dev_if;
68100+
68101+#ifdef CONFIG_GRKERNSEC_HIDESYM
68102+ r->id.idiag_cookie[0] = 0;
68103+ r->id.idiag_cookie[1] = 0;
68104+#else
68105 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
68106 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
68107+#endif
68108
68109 r->id.idiag_sport = inet->inet_sport;
68110 r->id.idiag_dport = inet->inet_dport;
68111@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
68112 r->idiag_family = tw->tw_family;
68113 r->idiag_retrans = 0;
68114 r->id.idiag_if = tw->tw_bound_dev_if;
68115+
68116+#ifdef CONFIG_GRKERNSEC_HIDESYM
68117+ r->id.idiag_cookie[0] = 0;
68118+ r->id.idiag_cookie[1] = 0;
68119+#else
68120 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
68121 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
68122+#endif
68123+
68124 r->id.idiag_sport = tw->tw_sport;
68125 r->id.idiag_dport = tw->tw_dport;
68126 r->id.idiag_src[0] = tw->tw_rcv_saddr;
68127@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
68128 if (sk == NULL)
68129 goto unlock;
68130
68131+#ifndef CONFIG_GRKERNSEC_HIDESYM
68132 err = -ESTALE;
68133 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
68134 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
68135 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
68136 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
68137 goto out;
68138+#endif
68139
68140 err = -ENOMEM;
68141 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
68142@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
68143 r->idiag_retrans = req->retrans;
68144
68145 r->id.idiag_if = sk->sk_bound_dev_if;
68146+
68147+#ifdef CONFIG_GRKERNSEC_HIDESYM
68148+ r->id.idiag_cookie[0] = 0;
68149+ r->id.idiag_cookie[1] = 0;
68150+#else
68151 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
68152 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
68153+#endif
68154
68155 tmo = req->expires - jiffies;
68156 if (tmo < 0)
68157diff -urNp linux-3.0.4/net/ipv4/inet_hashtables.c linux-3.0.4/net/ipv4/inet_hashtables.c
68158--- linux-3.0.4/net/ipv4/inet_hashtables.c 2011-08-23 21:44:40.000000000 -0400
68159+++ linux-3.0.4/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
68160@@ -18,12 +18,15 @@
68161 #include <linux/sched.h>
68162 #include <linux/slab.h>
68163 #include <linux/wait.h>
68164+#include <linux/security.h>
68165
68166 #include <net/inet_connection_sock.h>
68167 #include <net/inet_hashtables.h>
68168 #include <net/secure_seq.h>
68169 #include <net/ip.h>
68170
68171+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
68172+
68173 /*
68174 * Allocate and initialize a new local port bind bucket.
68175 * The bindhash mutex for snum's hash chain must be held here.
68176@@ -530,6 +533,8 @@ ok:
68177 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
68178 spin_unlock(&head->lock);
68179
68180+ gr_update_task_in_ip_table(current, inet_sk(sk));
68181+
68182 if (tw) {
68183 inet_twsk_deschedule(tw, death_row);
68184 while (twrefcnt) {
68185diff -urNp linux-3.0.4/net/ipv4/inetpeer.c linux-3.0.4/net/ipv4/inetpeer.c
68186--- linux-3.0.4/net/ipv4/inetpeer.c 2011-08-23 21:44:40.000000000 -0400
68187+++ linux-3.0.4/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
68188@@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
68189 unsigned int sequence;
68190 int invalidated, newrefcnt = 0;
68191
68192+ pax_track_stack();
68193+
68194 /* Look up for the address quickly, lockless.
68195 * Because of a concurrent writer, we might not find an existing entry.
68196 */
68197@@ -517,8 +519,8 @@ found: /* The existing node has been fo
68198 if (p) {
68199 p->daddr = *daddr;
68200 atomic_set(&p->refcnt, 1);
68201- atomic_set(&p->rid, 0);
68202- atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
68203+ atomic_set_unchecked(&p->rid, 0);
68204+ atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
68205 p->tcp_ts_stamp = 0;
68206 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
68207 p->rate_tokens = 0;
68208diff -urNp linux-3.0.4/net/ipv4/ip_fragment.c linux-3.0.4/net/ipv4/ip_fragment.c
68209--- linux-3.0.4/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
68210+++ linux-3.0.4/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
68211@@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
68212 return 0;
68213
68214 start = qp->rid;
68215- end = atomic_inc_return(&peer->rid);
68216+ end = atomic_inc_return_unchecked(&peer->rid);
68217 qp->rid = end;
68218
68219 rc = qp->q.fragments && (end - start) > max;
68220diff -urNp linux-3.0.4/net/ipv4/ip_sockglue.c linux-3.0.4/net/ipv4/ip_sockglue.c
68221--- linux-3.0.4/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
68222+++ linux-3.0.4/net/ipv4/ip_sockglue.c 2011-08-23 21:48:14.000000000 -0400
68223@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
68224 int val;
68225 int len;
68226
68227+ pax_track_stack();
68228+
68229 if (level != SOL_IP)
68230 return -EOPNOTSUPP;
68231
68232@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
68233 len = min_t(unsigned int, len, opt->optlen);
68234 if (put_user(len, optlen))
68235 return -EFAULT;
68236- if (copy_to_user(optval, opt->__data, len))
68237+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
68238+ copy_to_user(optval, opt->__data, len))
68239 return -EFAULT;
68240 return 0;
68241 }
68242diff -urNp linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
68243--- linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
68244+++ linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
68245@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
68246
68247 *len = 0;
68248
68249- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
68250+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
68251 if (*octets == NULL) {
68252 if (net_ratelimit())
68253 pr_notice("OOM in bsalg (%d)\n", __LINE__);
68254diff -urNp linux-3.0.4/net/ipv4/ping.c linux-3.0.4/net/ipv4/ping.c
68255--- linux-3.0.4/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
68256+++ linux-3.0.4/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
68257@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
68258 sk_rmem_alloc_get(sp),
68259 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68260 atomic_read(&sp->sk_refcnt), sp,
68261- atomic_read(&sp->sk_drops), len);
68262+ atomic_read_unchecked(&sp->sk_drops), len);
68263 }
68264
68265 static int ping_seq_show(struct seq_file *seq, void *v)
68266diff -urNp linux-3.0.4/net/ipv4/raw.c linux-3.0.4/net/ipv4/raw.c
68267--- linux-3.0.4/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
68268+++ linux-3.0.4/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
68269@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
68270 int raw_rcv(struct sock *sk, struct sk_buff *skb)
68271 {
68272 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
68273- atomic_inc(&sk->sk_drops);
68274+ atomic_inc_unchecked(&sk->sk_drops);
68275 kfree_skb(skb);
68276 return NET_RX_DROP;
68277 }
68278@@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
68279
68280 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
68281 {
68282+ struct icmp_filter filter;
68283+
68284 if (optlen > sizeof(struct icmp_filter))
68285 optlen = sizeof(struct icmp_filter);
68286- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
68287+ if (copy_from_user(&filter, optval, optlen))
68288 return -EFAULT;
68289+ raw_sk(sk)->filter = filter;
68290 return 0;
68291 }
68292
68293 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
68294 {
68295 int len, ret = -EFAULT;
68296+ struct icmp_filter filter;
68297
68298 if (get_user(len, optlen))
68299 goto out;
68300@@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
68301 if (len > sizeof(struct icmp_filter))
68302 len = sizeof(struct icmp_filter);
68303 ret = -EFAULT;
68304- if (put_user(len, optlen) ||
68305- copy_to_user(optval, &raw_sk(sk)->filter, len))
68306+ filter = raw_sk(sk)->filter;
68307+ if (put_user(len, optlen) || len > sizeof filter ||
68308+ copy_to_user(optval, &filter, len))
68309 goto out;
68310 ret = 0;
68311 out: return ret;
68312@@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
68313 sk_wmem_alloc_get(sp),
68314 sk_rmem_alloc_get(sp),
68315 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68316- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68317+ atomic_read(&sp->sk_refcnt),
68318+#ifdef CONFIG_GRKERNSEC_HIDESYM
68319+ NULL,
68320+#else
68321+ sp,
68322+#endif
68323+ atomic_read_unchecked(&sp->sk_drops));
68324 }
68325
68326 static int raw_seq_show(struct seq_file *seq, void *v)
68327diff -urNp linux-3.0.4/net/ipv4/route.c linux-3.0.4/net/ipv4/route.c
68328--- linux-3.0.4/net/ipv4/route.c 2011-08-23 21:44:40.000000000 -0400
68329+++ linux-3.0.4/net/ipv4/route.c 2011-08-23 21:47:56.000000000 -0400
68330@@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
68331
68332 static inline int rt_genid(struct net *net)
68333 {
68334- return atomic_read(&net->ipv4.rt_genid);
68335+ return atomic_read_unchecked(&net->ipv4.rt_genid);
68336 }
68337
68338 #ifdef CONFIG_PROC_FS
68339@@ -833,7 +833,7 @@ static void rt_cache_invalidate(struct n
68340 unsigned char shuffle;
68341
68342 get_random_bytes(&shuffle, sizeof(shuffle));
68343- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
68344+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
68345 }
68346
68347 /*
68348@@ -2834,7 +2834,7 @@ static int rt_fill_info(struct net *net,
68349 error = rt->dst.error;
68350 if (peer) {
68351 inet_peer_refcheck(rt->peer);
68352- id = atomic_read(&peer->ip_id_count) & 0xffff;
68353+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
68354 if (peer->tcp_ts_stamp) {
68355 ts = peer->tcp_ts;
68356 tsage = get_seconds() - peer->tcp_ts_stamp;
68357diff -urNp linux-3.0.4/net/ipv4/tcp.c linux-3.0.4/net/ipv4/tcp.c
68358--- linux-3.0.4/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
68359+++ linux-3.0.4/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
68360@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
68361 int val;
68362 int err = 0;
68363
68364+ pax_track_stack();
68365+
68366 /* These are data/string values, all the others are ints */
68367 switch (optname) {
68368 case TCP_CONGESTION: {
68369@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
68370 struct tcp_sock *tp = tcp_sk(sk);
68371 int val, len;
68372
68373+ pax_track_stack();
68374+
68375 if (get_user(len, optlen))
68376 return -EFAULT;
68377
68378diff -urNp linux-3.0.4/net/ipv4/tcp_ipv4.c linux-3.0.4/net/ipv4/tcp_ipv4.c
68379--- linux-3.0.4/net/ipv4/tcp_ipv4.c 2011-08-23 21:44:40.000000000 -0400
68380+++ linux-3.0.4/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
68381@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
68382 int sysctl_tcp_low_latency __read_mostly;
68383 EXPORT_SYMBOL(sysctl_tcp_low_latency);
68384
68385+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68386+extern int grsec_enable_blackhole;
68387+#endif
68388
68389 #ifdef CONFIG_TCP_MD5SIG
68390 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
68391@@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
68392 return 0;
68393
68394 reset:
68395+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68396+ if (!grsec_enable_blackhole)
68397+#endif
68398 tcp_v4_send_reset(rsk, skb);
68399 discard:
68400 kfree_skb(skb);
68401@@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
68402 TCP_SKB_CB(skb)->sacked = 0;
68403
68404 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68405- if (!sk)
68406+ if (!sk) {
68407+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68408+ ret = 1;
68409+#endif
68410 goto no_tcp_socket;
68411-
68412+ }
68413 process:
68414- if (sk->sk_state == TCP_TIME_WAIT)
68415+ if (sk->sk_state == TCP_TIME_WAIT) {
68416+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68417+ ret = 2;
68418+#endif
68419 goto do_time_wait;
68420+ }
68421
68422 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
68423 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
68424@@ -1724,6 +1737,10 @@ no_tcp_socket:
68425 bad_packet:
68426 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
68427 } else {
68428+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68429+ if (!grsec_enable_blackhole || (ret == 1 &&
68430+ (skb->dev->flags & IFF_LOOPBACK)))
68431+#endif
68432 tcp_v4_send_reset(NULL, skb);
68433 }
68434
68435@@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
68436 0, /* non standard timer */
68437 0, /* open_requests have no inode */
68438 atomic_read(&sk->sk_refcnt),
68439+#ifdef CONFIG_GRKERNSEC_HIDESYM
68440+ NULL,
68441+#else
68442 req,
68443+#endif
68444 len);
68445 }
68446
68447@@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
68448 sock_i_uid(sk),
68449 icsk->icsk_probes_out,
68450 sock_i_ino(sk),
68451- atomic_read(&sk->sk_refcnt), sk,
68452+ atomic_read(&sk->sk_refcnt),
68453+#ifdef CONFIG_GRKERNSEC_HIDESYM
68454+ NULL,
68455+#else
68456+ sk,
68457+#endif
68458 jiffies_to_clock_t(icsk->icsk_rto),
68459 jiffies_to_clock_t(icsk->icsk_ack.ato),
68460 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
68461@@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
68462 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
68463 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
68464 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68465- atomic_read(&tw->tw_refcnt), tw, len);
68466+ atomic_read(&tw->tw_refcnt),
68467+#ifdef CONFIG_GRKERNSEC_HIDESYM
68468+ NULL,
68469+#else
68470+ tw,
68471+#endif
68472+ len);
68473 }
68474
68475 #define TMPSZ 150
68476diff -urNp linux-3.0.4/net/ipv4/tcp_minisocks.c linux-3.0.4/net/ipv4/tcp_minisocks.c
68477--- linux-3.0.4/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
68478+++ linux-3.0.4/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
68479@@ -27,6 +27,10 @@
68480 #include <net/inet_common.h>
68481 #include <net/xfrm.h>
68482
68483+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68484+extern int grsec_enable_blackhole;
68485+#endif
68486+
68487 int sysctl_tcp_syncookies __read_mostly = 1;
68488 EXPORT_SYMBOL(sysctl_tcp_syncookies);
68489
68490@@ -745,6 +749,10 @@ listen_overflow:
68491
68492 embryonic_reset:
68493 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
68494+
68495+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68496+ if (!grsec_enable_blackhole)
68497+#endif
68498 if (!(flg & TCP_FLAG_RST))
68499 req->rsk_ops->send_reset(sk, skb);
68500
68501diff -urNp linux-3.0.4/net/ipv4/tcp_output.c linux-3.0.4/net/ipv4/tcp_output.c
68502--- linux-3.0.4/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
68503+++ linux-3.0.4/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
68504@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
68505 int mss;
68506 int s_data_desired = 0;
68507
68508+ pax_track_stack();
68509+
68510 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
68511 s_data_desired = cvp->s_data_desired;
68512 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
68513diff -urNp linux-3.0.4/net/ipv4/tcp_probe.c linux-3.0.4/net/ipv4/tcp_probe.c
68514--- linux-3.0.4/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
68515+++ linux-3.0.4/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
68516@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
68517 if (cnt + width >= len)
68518 break;
68519
68520- if (copy_to_user(buf + cnt, tbuf, width))
68521+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
68522 return -EFAULT;
68523 cnt += width;
68524 }
68525diff -urNp linux-3.0.4/net/ipv4/tcp_timer.c linux-3.0.4/net/ipv4/tcp_timer.c
68526--- linux-3.0.4/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
68527+++ linux-3.0.4/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
68528@@ -22,6 +22,10 @@
68529 #include <linux/gfp.h>
68530 #include <net/tcp.h>
68531
68532+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68533+extern int grsec_lastack_retries;
68534+#endif
68535+
68536 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
68537 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
68538 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
68539@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
68540 }
68541 }
68542
68543+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68544+ if ((sk->sk_state == TCP_LAST_ACK) &&
68545+ (grsec_lastack_retries > 0) &&
68546+ (grsec_lastack_retries < retry_until))
68547+ retry_until = grsec_lastack_retries;
68548+#endif
68549+
68550 if (retransmits_timed_out(sk, retry_until,
68551 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
68552 /* Has it gone just too far? */
68553diff -urNp linux-3.0.4/net/ipv4/udp.c linux-3.0.4/net/ipv4/udp.c
68554--- linux-3.0.4/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
68555+++ linux-3.0.4/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
68556@@ -86,6 +86,7 @@
68557 #include <linux/types.h>
68558 #include <linux/fcntl.h>
68559 #include <linux/module.h>
68560+#include <linux/security.h>
68561 #include <linux/socket.h>
68562 #include <linux/sockios.h>
68563 #include <linux/igmp.h>
68564@@ -107,6 +108,10 @@
68565 #include <net/xfrm.h>
68566 #include "udp_impl.h"
68567
68568+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68569+extern int grsec_enable_blackhole;
68570+#endif
68571+
68572 struct udp_table udp_table __read_mostly;
68573 EXPORT_SYMBOL(udp_table);
68574
68575@@ -564,6 +569,9 @@ found:
68576 return s;
68577 }
68578
68579+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
68580+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
68581+
68582 /*
68583 * This routine is called by the ICMP module when it gets some
68584 * sort of error condition. If err < 0 then the socket should
68585@@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
68586 dport = usin->sin_port;
68587 if (dport == 0)
68588 return -EINVAL;
68589+
68590+ err = gr_search_udp_sendmsg(sk, usin);
68591+ if (err)
68592+ return err;
68593 } else {
68594 if (sk->sk_state != TCP_ESTABLISHED)
68595 return -EDESTADDRREQ;
68596+
68597+ err = gr_search_udp_sendmsg(sk, NULL);
68598+ if (err)
68599+ return err;
68600+
68601 daddr = inet->inet_daddr;
68602 dport = inet->inet_dport;
68603 /* Open fast path for connected socket.
68604@@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
68605 udp_lib_checksum_complete(skb)) {
68606 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
68607 IS_UDPLITE(sk));
68608- atomic_inc(&sk->sk_drops);
68609+ atomic_inc_unchecked(&sk->sk_drops);
68610 __skb_unlink(skb, rcvq);
68611 __skb_queue_tail(&list_kill, skb);
68612 }
68613@@ -1184,6 +1201,10 @@ try_again:
68614 if (!skb)
68615 goto out;
68616
68617+ err = gr_search_udp_recvmsg(sk, skb);
68618+ if (err)
68619+ goto out_free;
68620+
68621 ulen = skb->len - sizeof(struct udphdr);
68622 if (len > ulen)
68623 len = ulen;
68624@@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
68625
68626 drop:
68627 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
68628- atomic_inc(&sk->sk_drops);
68629+ atomic_inc_unchecked(&sk->sk_drops);
68630 kfree_skb(skb);
68631 return -1;
68632 }
68633@@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
68634 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
68635
68636 if (!skb1) {
68637- atomic_inc(&sk->sk_drops);
68638+ atomic_inc_unchecked(&sk->sk_drops);
68639 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
68640 IS_UDPLITE(sk));
68641 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
68642@@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
68643 goto csum_error;
68644
68645 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
68646+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68647+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68648+#endif
68649 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
68650
68651 /*
68652@@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
68653 sk_wmem_alloc_get(sp),
68654 sk_rmem_alloc_get(sp),
68655 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68656- atomic_read(&sp->sk_refcnt), sp,
68657- atomic_read(&sp->sk_drops), len);
68658+ atomic_read(&sp->sk_refcnt),
68659+#ifdef CONFIG_GRKERNSEC_HIDESYM
68660+ NULL,
68661+#else
68662+ sp,
68663+#endif
68664+ atomic_read_unchecked(&sp->sk_drops), len);
68665 }
68666
68667 int udp4_seq_show(struct seq_file *seq, void *v)
68668diff -urNp linux-3.0.4/net/ipv6/inet6_connection_sock.c linux-3.0.4/net/ipv6/inet6_connection_sock.c
68669--- linux-3.0.4/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
68670+++ linux-3.0.4/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
68671@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
68672 #ifdef CONFIG_XFRM
68673 {
68674 struct rt6_info *rt = (struct rt6_info *)dst;
68675- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
68676+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
68677 }
68678 #endif
68679 }
68680@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
68681 #ifdef CONFIG_XFRM
68682 if (dst) {
68683 struct rt6_info *rt = (struct rt6_info *)dst;
68684- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
68685+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
68686 __sk_dst_reset(sk);
68687 dst = NULL;
68688 }
68689diff -urNp linux-3.0.4/net/ipv6/ipv6_sockglue.c linux-3.0.4/net/ipv6/ipv6_sockglue.c
68690--- linux-3.0.4/net/ipv6/ipv6_sockglue.c 2011-07-21 22:17:23.000000000 -0400
68691+++ linux-3.0.4/net/ipv6/ipv6_sockglue.c 2011-08-23 21:48:14.000000000 -0400
68692@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
68693 int val, valbool;
68694 int retv = -ENOPROTOOPT;
68695
68696+ pax_track_stack();
68697+
68698 if (optval == NULL)
68699 val=0;
68700 else {
68701@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
68702 int len;
68703 int val;
68704
68705+ pax_track_stack();
68706+
68707 if (ip6_mroute_opt(optname))
68708 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
68709
68710diff -urNp linux-3.0.4/net/ipv6/raw.c linux-3.0.4/net/ipv6/raw.c
68711--- linux-3.0.4/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
68712+++ linux-3.0.4/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
68713@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
68714 {
68715 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
68716 skb_checksum_complete(skb)) {
68717- atomic_inc(&sk->sk_drops);
68718+ atomic_inc_unchecked(&sk->sk_drops);
68719 kfree_skb(skb);
68720 return NET_RX_DROP;
68721 }
68722@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68723 struct raw6_sock *rp = raw6_sk(sk);
68724
68725 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
68726- atomic_inc(&sk->sk_drops);
68727+ atomic_inc_unchecked(&sk->sk_drops);
68728 kfree_skb(skb);
68729 return NET_RX_DROP;
68730 }
68731@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68732
68733 if (inet->hdrincl) {
68734 if (skb_checksum_complete(skb)) {
68735- atomic_inc(&sk->sk_drops);
68736+ atomic_inc_unchecked(&sk->sk_drops);
68737 kfree_skb(skb);
68738 return NET_RX_DROP;
68739 }
68740@@ -601,7 +601,7 @@ out:
68741 return err;
68742 }
68743
68744-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
68745+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
68746 struct flowi6 *fl6, struct dst_entry **dstp,
68747 unsigned int flags)
68748 {
68749@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
68750 u16 proto;
68751 int err;
68752
68753+ pax_track_stack();
68754+
68755 /* Rough check on arithmetic overflow,
68756 better check is made in ip6_append_data().
68757 */
68758@@ -909,12 +911,15 @@ do_confirm:
68759 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
68760 char __user *optval, int optlen)
68761 {
68762+ struct icmp6_filter filter;
68763+
68764 switch (optname) {
68765 case ICMPV6_FILTER:
68766 if (optlen > sizeof(struct icmp6_filter))
68767 optlen = sizeof(struct icmp6_filter);
68768- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
68769+ if (copy_from_user(&filter, optval, optlen))
68770 return -EFAULT;
68771+ raw6_sk(sk)->filter = filter;
68772 return 0;
68773 default:
68774 return -ENOPROTOOPT;
68775@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
68776 char __user *optval, int __user *optlen)
68777 {
68778 int len;
68779+ struct icmp6_filter filter;
68780
68781 switch (optname) {
68782 case ICMPV6_FILTER:
68783@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
68784 len = sizeof(struct icmp6_filter);
68785 if (put_user(len, optlen))
68786 return -EFAULT;
68787- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
68788+ filter = raw6_sk(sk)->filter;
68789+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
68790 return -EFAULT;
68791 return 0;
68792 default:
68793@@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
68794 0, 0L, 0,
68795 sock_i_uid(sp), 0,
68796 sock_i_ino(sp),
68797- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68798+ atomic_read(&sp->sk_refcnt),
68799+#ifdef CONFIG_GRKERNSEC_HIDESYM
68800+ NULL,
68801+#else
68802+ sp,
68803+#endif
68804+ atomic_read_unchecked(&sp->sk_drops));
68805 }
68806
68807 static int raw6_seq_show(struct seq_file *seq, void *v)
68808diff -urNp linux-3.0.4/net/ipv6/tcp_ipv6.c linux-3.0.4/net/ipv6/tcp_ipv6.c
68809--- linux-3.0.4/net/ipv6/tcp_ipv6.c 2011-08-23 21:44:40.000000000 -0400
68810+++ linux-3.0.4/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
68811@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
68812 }
68813 #endif
68814
68815+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68816+extern int grsec_enable_blackhole;
68817+#endif
68818+
68819 static void tcp_v6_hash(struct sock *sk)
68820 {
68821 if (sk->sk_state != TCP_CLOSE) {
68822@@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
68823 return 0;
68824
68825 reset:
68826+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68827+ if (!grsec_enable_blackhole)
68828+#endif
68829 tcp_v6_send_reset(sk, skb);
68830 discard:
68831 if (opt_skb)
68832@@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
68833 TCP_SKB_CB(skb)->sacked = 0;
68834
68835 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68836- if (!sk)
68837+ if (!sk) {
68838+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68839+ ret = 1;
68840+#endif
68841 goto no_tcp_socket;
68842+ }
68843
68844 process:
68845- if (sk->sk_state == TCP_TIME_WAIT)
68846+ if (sk->sk_state == TCP_TIME_WAIT) {
68847+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68848+ ret = 2;
68849+#endif
68850 goto do_time_wait;
68851+ }
68852
68853 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
68854 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
68855@@ -1794,6 +1809,10 @@ no_tcp_socket:
68856 bad_packet:
68857 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
68858 } else {
68859+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68860+ if (!grsec_enable_blackhole || (ret == 1 &&
68861+ (skb->dev->flags & IFF_LOOPBACK)))
68862+#endif
68863 tcp_v6_send_reset(NULL, skb);
68864 }
68865
68866@@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
68867 uid,
68868 0, /* non standard timer */
68869 0, /* open_requests have no inode */
68870- 0, req);
68871+ 0,
68872+#ifdef CONFIG_GRKERNSEC_HIDESYM
68873+ NULL
68874+#else
68875+ req
68876+#endif
68877+ );
68878 }
68879
68880 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
68881@@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
68882 sock_i_uid(sp),
68883 icsk->icsk_probes_out,
68884 sock_i_ino(sp),
68885- atomic_read(&sp->sk_refcnt), sp,
68886+ atomic_read(&sp->sk_refcnt),
68887+#ifdef CONFIG_GRKERNSEC_HIDESYM
68888+ NULL,
68889+#else
68890+ sp,
68891+#endif
68892 jiffies_to_clock_t(icsk->icsk_rto),
68893 jiffies_to_clock_t(icsk->icsk_ack.ato),
68894 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
68895@@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
68896 dest->s6_addr32[2], dest->s6_addr32[3], destp,
68897 tw->tw_substate, 0, 0,
68898 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68899- atomic_read(&tw->tw_refcnt), tw);
68900+ atomic_read(&tw->tw_refcnt),
68901+#ifdef CONFIG_GRKERNSEC_HIDESYM
68902+ NULL
68903+#else
68904+ tw
68905+#endif
68906+ );
68907 }
68908
68909 static int tcp6_seq_show(struct seq_file *seq, void *v)
68910diff -urNp linux-3.0.4/net/ipv6/udp.c linux-3.0.4/net/ipv6/udp.c
68911--- linux-3.0.4/net/ipv6/udp.c 2011-08-23 21:44:40.000000000 -0400
68912+++ linux-3.0.4/net/ipv6/udp.c 2011-08-23 21:48:14.000000000 -0400
68913@@ -50,6 +50,10 @@
68914 #include <linux/seq_file.h>
68915 #include "udp_impl.h"
68916
68917+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68918+extern int grsec_enable_blackhole;
68919+#endif
68920+
68921 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
68922 {
68923 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
68924@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
68925
68926 return 0;
68927 drop:
68928- atomic_inc(&sk->sk_drops);
68929+ atomic_inc_unchecked(&sk->sk_drops);
68930 drop_no_sk_drops_inc:
68931 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
68932 kfree_skb(skb);
68933@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
68934 continue;
68935 }
68936 drop:
68937- atomic_inc(&sk->sk_drops);
68938+ atomic_inc_unchecked(&sk->sk_drops);
68939 UDP6_INC_STATS_BH(sock_net(sk),
68940 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
68941 UDP6_INC_STATS_BH(sock_net(sk),
68942@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
68943 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
68944 proto == IPPROTO_UDPLITE);
68945
68946+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68947+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68948+#endif
68949 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
68950
68951 kfree_skb(skb);
68952@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
68953 if (!sock_owned_by_user(sk))
68954 udpv6_queue_rcv_skb(sk, skb);
68955 else if (sk_add_backlog(sk, skb)) {
68956- atomic_inc(&sk->sk_drops);
68957+ atomic_inc_unchecked(&sk->sk_drops);
68958 bh_unlock_sock(sk);
68959 sock_put(sk);
68960 goto discard;
68961@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
68962 0, 0L, 0,
68963 sock_i_uid(sp), 0,
68964 sock_i_ino(sp),
68965- atomic_read(&sp->sk_refcnt), sp,
68966- atomic_read(&sp->sk_drops));
68967+ atomic_read(&sp->sk_refcnt),
68968+#ifdef CONFIG_GRKERNSEC_HIDESYM
68969+ NULL,
68970+#else
68971+ sp,
68972+#endif
68973+ atomic_read_unchecked(&sp->sk_drops));
68974 }
68975
68976 int udp6_seq_show(struct seq_file *seq, void *v)
68977diff -urNp linux-3.0.4/net/irda/ircomm/ircomm_tty.c linux-3.0.4/net/irda/ircomm/ircomm_tty.c
68978--- linux-3.0.4/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
68979+++ linux-3.0.4/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
68980@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
68981 add_wait_queue(&self->open_wait, &wait);
68982
68983 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
68984- __FILE__,__LINE__, tty->driver->name, self->open_count );
68985+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68986
68987 /* As far as I can see, we protect open_count - Jean II */
68988 spin_lock_irqsave(&self->spinlock, flags);
68989 if (!tty_hung_up_p(filp)) {
68990 extra_count = 1;
68991- self->open_count--;
68992+ local_dec(&self->open_count);
68993 }
68994 spin_unlock_irqrestore(&self->spinlock, flags);
68995- self->blocked_open++;
68996+ local_inc(&self->blocked_open);
68997
68998 while (1) {
68999 if (tty->termios->c_cflag & CBAUD) {
69000@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
69001 }
69002
69003 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
69004- __FILE__,__LINE__, tty->driver->name, self->open_count );
69005+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
69006
69007 schedule();
69008 }
69009@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
69010 if (extra_count) {
69011 /* ++ is not atomic, so this should be protected - Jean II */
69012 spin_lock_irqsave(&self->spinlock, flags);
69013- self->open_count++;
69014+ local_inc(&self->open_count);
69015 spin_unlock_irqrestore(&self->spinlock, flags);
69016 }
69017- self->blocked_open--;
69018+ local_dec(&self->blocked_open);
69019
69020 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
69021- __FILE__,__LINE__, tty->driver->name, self->open_count);
69022+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
69023
69024 if (!retval)
69025 self->flags |= ASYNC_NORMAL_ACTIVE;
69026@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
69027 }
69028 /* ++ is not atomic, so this should be protected - Jean II */
69029 spin_lock_irqsave(&self->spinlock, flags);
69030- self->open_count++;
69031+ local_inc(&self->open_count);
69032
69033 tty->driver_data = self;
69034 self->tty = tty;
69035 spin_unlock_irqrestore(&self->spinlock, flags);
69036
69037 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
69038- self->line, self->open_count);
69039+ self->line, local_read(&self->open_count));
69040
69041 /* Not really used by us, but lets do it anyway */
69042 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
69043@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
69044 return;
69045 }
69046
69047- if ((tty->count == 1) && (self->open_count != 1)) {
69048+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
69049 /*
69050 * Uh, oh. tty->count is 1, which means that the tty
69051 * structure will be freed. state->count should always
69052@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
69053 */
69054 IRDA_DEBUG(0, "%s(), bad serial port count; "
69055 "tty->count is 1, state->count is %d\n", __func__ ,
69056- self->open_count);
69057- self->open_count = 1;
69058+ local_read(&self->open_count));
69059+ local_set(&self->open_count, 1);
69060 }
69061
69062- if (--self->open_count < 0) {
69063+ if (local_dec_return(&self->open_count) < 0) {
69064 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
69065- __func__, self->line, self->open_count);
69066- self->open_count = 0;
69067+ __func__, self->line, local_read(&self->open_count));
69068+ local_set(&self->open_count, 0);
69069 }
69070- if (self->open_count) {
69071+ if (local_read(&self->open_count)) {
69072 spin_unlock_irqrestore(&self->spinlock, flags);
69073
69074 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
69075@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
69076 tty->closing = 0;
69077 self->tty = NULL;
69078
69079- if (self->blocked_open) {
69080+ if (local_read(&self->blocked_open)) {
69081 if (self->close_delay)
69082 schedule_timeout_interruptible(self->close_delay);
69083 wake_up_interruptible(&self->open_wait);
69084@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
69085 spin_lock_irqsave(&self->spinlock, flags);
69086 self->flags &= ~ASYNC_NORMAL_ACTIVE;
69087 self->tty = NULL;
69088- self->open_count = 0;
69089+ local_set(&self->open_count, 0);
69090 spin_unlock_irqrestore(&self->spinlock, flags);
69091
69092 wake_up_interruptible(&self->open_wait);
69093@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
69094 seq_putc(m, '\n');
69095
69096 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
69097- seq_printf(m, "Open count: %d\n", self->open_count);
69098+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
69099 seq_printf(m, "Max data size: %d\n", self->max_data_size);
69100 seq_printf(m, "Max header size: %d\n", self->max_header_size);
69101
69102diff -urNp linux-3.0.4/net/iucv/af_iucv.c linux-3.0.4/net/iucv/af_iucv.c
69103--- linux-3.0.4/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
69104+++ linux-3.0.4/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
69105@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
69106
69107 write_lock_bh(&iucv_sk_list.lock);
69108
69109- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
69110+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69111 while (__iucv_get_sock_by_name(name)) {
69112 sprintf(name, "%08x",
69113- atomic_inc_return(&iucv_sk_list.autobind_name));
69114+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69115 }
69116
69117 write_unlock_bh(&iucv_sk_list.lock);
69118diff -urNp linux-3.0.4/net/key/af_key.c linux-3.0.4/net/key/af_key.c
69119--- linux-3.0.4/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
69120+++ linux-3.0.4/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
69121@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
69122 struct xfrm_migrate m[XFRM_MAX_DEPTH];
69123 struct xfrm_kmaddress k;
69124
69125+ pax_track_stack();
69126+
69127 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
69128 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
69129 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
69130@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
69131 static u32 get_acqseq(void)
69132 {
69133 u32 res;
69134- static atomic_t acqseq;
69135+ static atomic_unchecked_t acqseq;
69136
69137 do {
69138- res = atomic_inc_return(&acqseq);
69139+ res = atomic_inc_return_unchecked(&acqseq);
69140 } while (!res);
69141 return res;
69142 }
69143diff -urNp linux-3.0.4/net/lapb/lapb_iface.c linux-3.0.4/net/lapb/lapb_iface.c
69144--- linux-3.0.4/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
69145+++ linux-3.0.4/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
69146@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
69147 goto out;
69148
69149 lapb->dev = dev;
69150- lapb->callbacks = *callbacks;
69151+ lapb->callbacks = callbacks;
69152
69153 __lapb_insert_cb(lapb);
69154
69155@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
69156
69157 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
69158 {
69159- if (lapb->callbacks.connect_confirmation)
69160- lapb->callbacks.connect_confirmation(lapb->dev, reason);
69161+ if (lapb->callbacks->connect_confirmation)
69162+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
69163 }
69164
69165 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
69166 {
69167- if (lapb->callbacks.connect_indication)
69168- lapb->callbacks.connect_indication(lapb->dev, reason);
69169+ if (lapb->callbacks->connect_indication)
69170+ lapb->callbacks->connect_indication(lapb->dev, reason);
69171 }
69172
69173 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
69174 {
69175- if (lapb->callbacks.disconnect_confirmation)
69176- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
69177+ if (lapb->callbacks->disconnect_confirmation)
69178+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
69179 }
69180
69181 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
69182 {
69183- if (lapb->callbacks.disconnect_indication)
69184- lapb->callbacks.disconnect_indication(lapb->dev, reason);
69185+ if (lapb->callbacks->disconnect_indication)
69186+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
69187 }
69188
69189 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
69190 {
69191- if (lapb->callbacks.data_indication)
69192- return lapb->callbacks.data_indication(lapb->dev, skb);
69193+ if (lapb->callbacks->data_indication)
69194+ return lapb->callbacks->data_indication(lapb->dev, skb);
69195
69196 kfree_skb(skb);
69197 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
69198@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
69199 {
69200 int used = 0;
69201
69202- if (lapb->callbacks.data_transmit) {
69203- lapb->callbacks.data_transmit(lapb->dev, skb);
69204+ if (lapb->callbacks->data_transmit) {
69205+ lapb->callbacks->data_transmit(lapb->dev, skb);
69206 used = 1;
69207 }
69208
69209diff -urNp linux-3.0.4/net/mac80211/debugfs_sta.c linux-3.0.4/net/mac80211/debugfs_sta.c
69210--- linux-3.0.4/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
69211+++ linux-3.0.4/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
69212@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
69213 struct tid_ampdu_rx *tid_rx;
69214 struct tid_ampdu_tx *tid_tx;
69215
69216+ pax_track_stack();
69217+
69218 rcu_read_lock();
69219
69220 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
69221@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
69222 struct sta_info *sta = file->private_data;
69223 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
69224
69225+ pax_track_stack();
69226+
69227 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
69228 htc->ht_supported ? "" : "not ");
69229 if (htc->ht_supported) {
69230diff -urNp linux-3.0.4/net/mac80211/ieee80211_i.h linux-3.0.4/net/mac80211/ieee80211_i.h
69231--- linux-3.0.4/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
69232+++ linux-3.0.4/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
69233@@ -27,6 +27,7 @@
69234 #include <net/ieee80211_radiotap.h>
69235 #include <net/cfg80211.h>
69236 #include <net/mac80211.h>
69237+#include <asm/local.h>
69238 #include "key.h"
69239 #include "sta_info.h"
69240
69241@@ -721,7 +722,7 @@ struct ieee80211_local {
69242 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
69243 spinlock_t queue_stop_reason_lock;
69244
69245- int open_count;
69246+ local_t open_count;
69247 int monitors, cooked_mntrs;
69248 /* number of interfaces with corresponding FIF_ flags */
69249 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
69250diff -urNp linux-3.0.4/net/mac80211/iface.c linux-3.0.4/net/mac80211/iface.c
69251--- linux-3.0.4/net/mac80211/iface.c 2011-08-23 21:44:40.000000000 -0400
69252+++ linux-3.0.4/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
69253@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
69254 break;
69255 }
69256
69257- if (local->open_count == 0) {
69258+ if (local_read(&local->open_count) == 0) {
69259 res = drv_start(local);
69260 if (res)
69261 goto err_del_bss;
69262@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
69263 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
69264
69265 if (!is_valid_ether_addr(dev->dev_addr)) {
69266- if (!local->open_count)
69267+ if (!local_read(&local->open_count))
69268 drv_stop(local);
69269 return -EADDRNOTAVAIL;
69270 }
69271@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
69272 mutex_unlock(&local->mtx);
69273
69274 if (coming_up)
69275- local->open_count++;
69276+ local_inc(&local->open_count);
69277
69278 if (hw_reconf_flags) {
69279 ieee80211_hw_config(local, hw_reconf_flags);
69280@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
69281 err_del_interface:
69282 drv_remove_interface(local, &sdata->vif);
69283 err_stop:
69284- if (!local->open_count)
69285+ if (!local_read(&local->open_count))
69286 drv_stop(local);
69287 err_del_bss:
69288 sdata->bss = NULL;
69289@@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
69290 }
69291
69292 if (going_down)
69293- local->open_count--;
69294+ local_dec(&local->open_count);
69295
69296 switch (sdata->vif.type) {
69297 case NL80211_IFTYPE_AP_VLAN:
69298@@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
69299
69300 ieee80211_recalc_ps(local, -1);
69301
69302- if (local->open_count == 0) {
69303+ if (local_read(&local->open_count) == 0) {
69304 if (local->ops->napi_poll)
69305 napi_disable(&local->napi);
69306 ieee80211_clear_tx_pending(local);
69307diff -urNp linux-3.0.4/net/mac80211/main.c linux-3.0.4/net/mac80211/main.c
69308--- linux-3.0.4/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
69309+++ linux-3.0.4/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
69310@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
69311 local->hw.conf.power_level = power;
69312 }
69313
69314- if (changed && local->open_count) {
69315+ if (changed && local_read(&local->open_count)) {
69316 ret = drv_config(local, changed);
69317 /*
69318 * Goal:
69319diff -urNp linux-3.0.4/net/mac80211/mlme.c linux-3.0.4/net/mac80211/mlme.c
69320--- linux-3.0.4/net/mac80211/mlme.c 2011-08-23 21:44:40.000000000 -0400
69321+++ linux-3.0.4/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
69322@@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
69323 bool have_higher_than_11mbit = false;
69324 u16 ap_ht_cap_flags;
69325
69326+ pax_track_stack();
69327+
69328 /* AssocResp and ReassocResp have identical structure */
69329
69330 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
69331diff -urNp linux-3.0.4/net/mac80211/pm.c linux-3.0.4/net/mac80211/pm.c
69332--- linux-3.0.4/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
69333+++ linux-3.0.4/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
69334@@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
69335 cancel_work_sync(&local->dynamic_ps_enable_work);
69336 del_timer_sync(&local->dynamic_ps_timer);
69337
69338- local->wowlan = wowlan && local->open_count;
69339+ local->wowlan = wowlan && local_read(&local->open_count);
69340 if (local->wowlan) {
69341 int err = drv_suspend(local, wowlan);
69342 if (err) {
69343@@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
69344 }
69345
69346 /* stop hardware - this must stop RX */
69347- if (local->open_count)
69348+ if (local_read(&local->open_count))
69349 ieee80211_stop_device(local);
69350
69351 suspend:
69352diff -urNp linux-3.0.4/net/mac80211/rate.c linux-3.0.4/net/mac80211/rate.c
69353--- linux-3.0.4/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
69354+++ linux-3.0.4/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
69355@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
69356
69357 ASSERT_RTNL();
69358
69359- if (local->open_count)
69360+ if (local_read(&local->open_count))
69361 return -EBUSY;
69362
69363 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
69364diff -urNp linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c
69365--- linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
69366+++ linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
69367@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
69368
69369 spin_unlock_irqrestore(&events->lock, status);
69370
69371- if (copy_to_user(buf, pb, p))
69372+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
69373 return -EFAULT;
69374
69375 return p;
69376diff -urNp linux-3.0.4/net/mac80211/util.c linux-3.0.4/net/mac80211/util.c
69377--- linux-3.0.4/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
69378+++ linux-3.0.4/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
69379@@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
69380 #endif
69381
69382 /* restart hardware */
69383- if (local->open_count) {
69384+ if (local_read(&local->open_count)) {
69385 /*
69386 * Upon resume hardware can sometimes be goofy due to
69387 * various platform / driver / bus issues, so restarting
69388diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c
69389--- linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
69390+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
69391@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
69392 /* Increase the refcnt counter of the dest */
69393 atomic_inc(&dest->refcnt);
69394
69395- conn_flags = atomic_read(&dest->conn_flags);
69396+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
69397 if (cp->protocol != IPPROTO_UDP)
69398 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
69399 /* Bind with the destination and its corresponding transmitter */
69400@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
69401 atomic_set(&cp->refcnt, 1);
69402
69403 atomic_set(&cp->n_control, 0);
69404- atomic_set(&cp->in_pkts, 0);
69405+ atomic_set_unchecked(&cp->in_pkts, 0);
69406
69407 atomic_inc(&ipvs->conn_count);
69408 if (flags & IP_VS_CONN_F_NO_CPORT)
69409@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
69410
69411 /* Don't drop the entry if its number of incoming packets is not
69412 located in [0, 8] */
69413- i = atomic_read(&cp->in_pkts);
69414+ i = atomic_read_unchecked(&cp->in_pkts);
69415 if (i > 8 || i < 0) return 0;
69416
69417 if (!todrop_rate[i]) return 0;
69418diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c
69419--- linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
69420+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
69421@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
69422 ret = cp->packet_xmit(skb, cp, pd->pp);
69423 /* do not touch skb anymore */
69424
69425- atomic_inc(&cp->in_pkts);
69426+ atomic_inc_unchecked(&cp->in_pkts);
69427 ip_vs_conn_put(cp);
69428 return ret;
69429 }
69430@@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
69431 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
69432 pkts = sysctl_sync_threshold(ipvs);
69433 else
69434- pkts = atomic_add_return(1, &cp->in_pkts);
69435+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
69436
69437 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
69438 cp->protocol == IPPROTO_SCTP) {
69439diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c
69440--- linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:44:40.000000000 -0400
69441+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
69442@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
69443 ip_vs_rs_hash(ipvs, dest);
69444 write_unlock_bh(&ipvs->rs_lock);
69445 }
69446- atomic_set(&dest->conn_flags, conn_flags);
69447+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
69448
69449 /* bind the service */
69450 if (!dest->svc) {
69451@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
69452 " %-7s %-6d %-10d %-10d\n",
69453 &dest->addr.in6,
69454 ntohs(dest->port),
69455- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
69456+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69457 atomic_read(&dest->weight),
69458 atomic_read(&dest->activeconns),
69459 atomic_read(&dest->inactconns));
69460@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
69461 "%-7s %-6d %-10d %-10d\n",
69462 ntohl(dest->addr.ip),
69463 ntohs(dest->port),
69464- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
69465+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69466 atomic_read(&dest->weight),
69467 atomic_read(&dest->activeconns),
69468 atomic_read(&dest->inactconns));
69469@@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
69470 struct ip_vs_dest_user *udest_compat;
69471 struct ip_vs_dest_user_kern udest;
69472
69473+ pax_track_stack();
69474+
69475 if (!capable(CAP_NET_ADMIN))
69476 return -EPERM;
69477
69478@@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
69479
69480 entry.addr = dest->addr.ip;
69481 entry.port = dest->port;
69482- entry.conn_flags = atomic_read(&dest->conn_flags);
69483+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
69484 entry.weight = atomic_read(&dest->weight);
69485 entry.u_threshold = dest->u_threshold;
69486 entry.l_threshold = dest->l_threshold;
69487@@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
69488 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
69489
69490 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
69491- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69492+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69493 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
69494 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
69495 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
69496diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c
69497--- linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
69498+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
69499@@ -648,7 +648,7 @@ control:
69500 * i.e only increment in_pkts for Templates.
69501 */
69502 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
69503- int pkts = atomic_add_return(1, &cp->in_pkts);
69504+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
69505
69506 if (pkts % sysctl_sync_period(ipvs) != 1)
69507 return;
69508@@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
69509
69510 if (opt)
69511 memcpy(&cp->in_seq, opt, sizeof(*opt));
69512- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
69513+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
69514 cp->state = state;
69515 cp->old_state = cp->state;
69516 /*
69517diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c
69518--- linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
69519+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
69520@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
69521 else
69522 rc = NF_ACCEPT;
69523 /* do not touch skb anymore */
69524- atomic_inc(&cp->in_pkts);
69525+ atomic_inc_unchecked(&cp->in_pkts);
69526 goto out;
69527 }
69528
69529@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
69530 else
69531 rc = NF_ACCEPT;
69532 /* do not touch skb anymore */
69533- atomic_inc(&cp->in_pkts);
69534+ atomic_inc_unchecked(&cp->in_pkts);
69535 goto out;
69536 }
69537
69538diff -urNp linux-3.0.4/net/netfilter/Kconfig linux-3.0.4/net/netfilter/Kconfig
69539--- linux-3.0.4/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
69540+++ linux-3.0.4/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
69541@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
69542
69543 To compile it as a module, choose M here. If unsure, say N.
69544
69545+config NETFILTER_XT_MATCH_GRADM
69546+ tristate '"gradm" match support'
69547+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
69548+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
69549+ ---help---
69550+ The gradm match allows to match on grsecurity RBAC being enabled.
69551+ It is useful when iptables rules are applied early on bootup to
69552+ prevent connections to the machine (except from a trusted host)
69553+ while the RBAC system is disabled.
69554+
69555 config NETFILTER_XT_MATCH_HASHLIMIT
69556 tristate '"hashlimit" match support'
69557 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
69558diff -urNp linux-3.0.4/net/netfilter/Makefile linux-3.0.4/net/netfilter/Makefile
69559--- linux-3.0.4/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
69560+++ linux-3.0.4/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
69561@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
69562 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
69563 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
69564 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
69565+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
69566 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
69567 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
69568 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
69569diff -urNp linux-3.0.4/net/netfilter/nfnetlink_log.c linux-3.0.4/net/netfilter/nfnetlink_log.c
69570--- linux-3.0.4/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
69571+++ linux-3.0.4/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
69572@@ -70,7 +70,7 @@ struct nfulnl_instance {
69573 };
69574
69575 static DEFINE_SPINLOCK(instances_lock);
69576-static atomic_t global_seq;
69577+static atomic_unchecked_t global_seq;
69578
69579 #define INSTANCE_BUCKETS 16
69580 static struct hlist_head instance_table[INSTANCE_BUCKETS];
69581@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
69582 /* global sequence number */
69583 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
69584 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
69585- htonl(atomic_inc_return(&global_seq)));
69586+ htonl(atomic_inc_return_unchecked(&global_seq)));
69587
69588 if (data_len) {
69589 struct nlattr *nla;
69590diff -urNp linux-3.0.4/net/netfilter/nfnetlink_queue.c linux-3.0.4/net/netfilter/nfnetlink_queue.c
69591--- linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
69592+++ linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
69593@@ -58,7 +58,7 @@ struct nfqnl_instance {
69594 */
69595 spinlock_t lock;
69596 unsigned int queue_total;
69597- atomic_t id_sequence; /* 'sequence' of pkt ids */
69598+ atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
69599 struct list_head queue_list; /* packets in queue */
69600 };
69601
69602@@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
69603 nfmsg->version = NFNETLINK_V0;
69604 nfmsg->res_id = htons(queue->queue_num);
69605
69606- entry->id = atomic_inc_return(&queue->id_sequence);
69607+ entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
69608 pmsg.packet_id = htonl(entry->id);
69609 pmsg.hw_protocol = entskb->protocol;
69610 pmsg.hook = entry->hook;
69611@@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
69612 inst->peer_pid, inst->queue_total,
69613 inst->copy_mode, inst->copy_range,
69614 inst->queue_dropped, inst->queue_user_dropped,
69615- atomic_read(&inst->id_sequence), 1);
69616+ atomic_read_unchecked(&inst->id_sequence), 1);
69617 }
69618
69619 static const struct seq_operations nfqnl_seq_ops = {
69620diff -urNp linux-3.0.4/net/netfilter/xt_gradm.c linux-3.0.4/net/netfilter/xt_gradm.c
69621--- linux-3.0.4/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
69622+++ linux-3.0.4/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
69623@@ -0,0 +1,51 @@
69624+/*
69625+ * gradm match for netfilter
69626