]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.0.4-201109170101.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.4-201109170101.patch
CommitLineData
c3d28382
PK
1diff -urNp linux-3.0.4/arch/alpha/include/asm/elf.h linux-3.0.4/arch/alpha/include/asm/elf.h
2--- linux-3.0.4/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
3+++ linux-3.0.4/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
4@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-3.0.4/arch/alpha/include/asm/pgtable.h linux-3.0.4/arch/alpha/include/asm/pgtable.h
19--- linux-3.0.4/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
20+++ linux-3.0.4/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-3.0.4/arch/alpha/kernel/module.c linux-3.0.4/arch/alpha/kernel/module.c
40--- linux-3.0.4/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
41+++ linux-3.0.4/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-3.0.4/arch/alpha/kernel/osf_sys.c linux-3.0.4/arch/alpha/kernel/osf_sys.c
52--- linux-3.0.4/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
53+++ linux-3.0.4/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
54@@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-3.0.4/arch/alpha/mm/fault.c linux-3.0.4/arch/alpha/mm/fault.c
86--- linux-3.0.4/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
87+++ linux-3.0.4/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-3.0.4/arch/arm/include/asm/elf.h linux-3.0.4/arch/arm/include/asm/elf.h
245--- linux-3.0.4/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
246+++ linux-3.0.4/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
247@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267-struct mm_struct;
268-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269-#define arch_randomize_brk arch_randomize_brk
270-
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274diff -urNp linux-3.0.4/arch/arm/include/asm/kmap_types.h linux-3.0.4/arch/arm/include/asm/kmap_types.h
275--- linux-3.0.4/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
276+++ linux-3.0.4/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
277@@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281+ KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285diff -urNp linux-3.0.4/arch/arm/include/asm/uaccess.h linux-3.0.4/arch/arm/include/asm/uaccess.h
286--- linux-3.0.4/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
287+++ linux-3.0.4/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
288@@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292+extern void check_object_size(const void *ptr, unsigned long n, bool to);
293+
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297@@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305+
306+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307+{
308+ if (!__builtin_constant_p(n))
309+ check_object_size(to, n, false);
310+ return ___copy_from_user(to, from, n);
311+}
312+
313+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314+{
315+ if (!__builtin_constant_p(n))
316+ check_object_size(from, n, true);
317+ return ___copy_to_user(to, from, n);
318+}
319+
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327+ if ((long)n < 0)
328+ return n;
329+
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333@@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337+ if ((long)n < 0)
338+ return n;
339+
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343diff -urNp linux-3.0.4/arch/arm/kernel/armksyms.c linux-3.0.4/arch/arm/kernel/armksyms.c
344--- linux-3.0.4/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
345+++ linux-3.0.4/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
346@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350-EXPORT_SYMBOL(__copy_from_user);
351-EXPORT_SYMBOL(__copy_to_user);
352+EXPORT_SYMBOL(___copy_from_user);
353+EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357diff -urNp linux-3.0.4/arch/arm/kernel/process.c linux-3.0.4/arch/arm/kernel/process.c
358--- linux-3.0.4/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
359+++ linux-3.0.4/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
360@@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364-#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368@@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372-unsigned long arch_randomize_brk(struct mm_struct *mm)
373-{
374- unsigned long range_end = mm->brk + 0x02000000;
375- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376-}
377-
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381diff -urNp linux-3.0.4/arch/arm/kernel/traps.c linux-3.0.4/arch/arm/kernel/traps.c
382--- linux-3.0.4/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383+++ linux-3.0.4/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384@@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388+extern void gr_handle_kernel_exploit(void);
389+
390 /*
391 * This function is protected against re-entrancy.
392 */
393@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397+
398+ gr_handle_kernel_exploit();
399+
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403diff -urNp linux-3.0.4/arch/arm/lib/copy_from_user.S linux-3.0.4/arch/arm/lib/copy_from_user.S
404--- linux-3.0.4/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
405+++ linux-3.0.4/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
406@@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410- * size_t __copy_from_user(void *to, const void *from, size_t n)
411+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415@@ -84,11 +84,11 @@
416
417 .text
418
419-ENTRY(__copy_from_user)
420+ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424-ENDPROC(__copy_from_user)
425+ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429diff -urNp linux-3.0.4/arch/arm/lib/copy_to_user.S linux-3.0.4/arch/arm/lib/copy_to_user.S
430--- linux-3.0.4/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
431+++ linux-3.0.4/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
432@@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436- * size_t __copy_to_user(void *to, const void *from, size_t n)
437+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441@@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445-WEAK(__copy_to_user)
446+WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450-ENDPROC(__copy_to_user)
451+ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455diff -urNp linux-3.0.4/arch/arm/lib/uaccess.S linux-3.0.4/arch/arm/lib/uaccess.S
456--- linux-3.0.4/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
457+++ linux-3.0.4/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
458@@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471-ENTRY(__copy_to_user)
472+ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480-ENDPROC(__copy_to_user)
481+ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497-ENTRY(__copy_from_user)
498+ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506-ENDPROC(__copy_from_user)
507+ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511diff -urNp linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c
512--- linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
513+++ linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
514@@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518-__copy_to_user(void __user *to, const void *from, unsigned long n)
519+___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523diff -urNp linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c
524--- linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
525+++ linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
526@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535diff -urNp linux-3.0.4/arch/arm/mm/fault.c linux-3.0.4/arch/arm/mm/fault.c
536--- linux-3.0.4/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
537+++ linux-3.0.4/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
538@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542+#ifdef CONFIG_PAX_PAGEEXEC
543+ if (fsr & FSR_LNX_PF) {
544+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545+ do_group_exit(SIGKILL);
546+ }
547+#endif
548+
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552@@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556+#ifdef CONFIG_PAX_PAGEEXEC
557+void pax_report_insns(void *pc, void *sp)
558+{
559+ long i;
560+
561+ printk(KERN_ERR "PAX: bytes at PC: ");
562+ for (i = 0; i < 20; i++) {
563+ unsigned char c;
564+ if (get_user(c, (__force unsigned char __user *)pc+i))
565+ printk(KERN_CONT "?? ");
566+ else
567+ printk(KERN_CONT "%02x ", c);
568+ }
569+ printk("\n");
570+
571+ printk(KERN_ERR "PAX: bytes at SP-4: ");
572+ for (i = -1; i < 20; i++) {
573+ unsigned long c;
574+ if (get_user(c, (__force unsigned long __user *)sp+i))
575+ printk(KERN_CONT "???????? ");
576+ else
577+ printk(KERN_CONT "%08lx ", c);
578+ }
579+ printk("\n");
580+}
581+#endif
582+
583 /*
584 * First Level Translation Fault Handler
585 *
586diff -urNp linux-3.0.4/arch/arm/mm/mmap.c linux-3.0.4/arch/arm/mm/mmap.c
587--- linux-3.0.4/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
588+++ linux-3.0.4/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
589@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593+#ifdef CONFIG_PAX_RANDMMAP
594+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595+#endif
596+
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604- if (TASK_SIZE - len >= addr &&
605- (!vma || addr + len <= vma->vm_start))
606+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610- start_addr = addr = mm->free_area_cache;
611+ start_addr = addr = mm->free_area_cache;
612 } else {
613- start_addr = addr = TASK_UNMAPPED_BASE;
614- mm->cached_hole_size = 0;
615+ start_addr = addr = mm->mmap_base;
616+ mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620@@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624- if (start_addr != TASK_UNMAPPED_BASE) {
625- start_addr = addr = TASK_UNMAPPED_BASE;
626+ if (start_addr != mm->mmap_base) {
627+ start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633- if (!vma || addr + len <= vma->vm_start) {
634+ if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638diff -urNp linux-3.0.4/arch/avr32/include/asm/elf.h linux-3.0.4/arch/avr32/include/asm/elf.h
639--- linux-3.0.4/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
640+++ linux-3.0.4/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
641@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648+#ifdef CONFIG_PAX_ASLR
649+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650+
651+#define PAX_DELTA_MMAP_LEN 15
652+#define PAX_DELTA_STACK_LEN 15
653+#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657diff -urNp linux-3.0.4/arch/avr32/include/asm/kmap_types.h linux-3.0.4/arch/avr32/include/asm/kmap_types.h
658--- linux-3.0.4/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
659+++ linux-3.0.4/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
660@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664-D(14) KM_TYPE_NR
665+D(14) KM_CLEARPAGE,
666+D(15) KM_TYPE_NR
667 };
668
669 #undef D
670diff -urNp linux-3.0.4/arch/avr32/mm/fault.c linux-3.0.4/arch/avr32/mm/fault.c
671--- linux-3.0.4/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
672+++ linux-3.0.4/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
673@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677+#ifdef CONFIG_PAX_PAGEEXEC
678+void pax_report_insns(void *pc, void *sp)
679+{
680+ unsigned long i;
681+
682+ printk(KERN_ERR "PAX: bytes at PC: ");
683+ for (i = 0; i < 20; i++) {
684+ unsigned char c;
685+ if (get_user(c, (unsigned char *)pc+i))
686+ printk(KERN_CONT "???????? ");
687+ else
688+ printk(KERN_CONT "%02x ", c);
689+ }
690+ printk("\n");
691+}
692+#endif
693+
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697@@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706+ do_group_exit(SIGKILL);
707+ }
708+ }
709+#endif
710+
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714diff -urNp linux-3.0.4/arch/frv/include/asm/kmap_types.h linux-3.0.4/arch/frv/include/asm/kmap_types.h
715--- linux-3.0.4/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
716+++ linux-3.0.4/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
717@@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721+ KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725diff -urNp linux-3.0.4/arch/frv/mm/elf-fdpic.c linux-3.0.4/arch/frv/mm/elf-fdpic.c
726--- linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727+++ linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732- if (TASK_SIZE - len >= addr &&
733- (!vma || addr + len <= vma->vm_start))
734+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742- if (addr + len <= vma->vm_start)
743+ if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751- if (addr + len <= vma->vm_start)
752+ if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756diff -urNp linux-3.0.4/arch/ia64/include/asm/elf.h linux-3.0.4/arch/ia64/include/asm/elf.h
757--- linux-3.0.4/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
758+++ linux-3.0.4/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
759@@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763+#ifdef CONFIG_PAX_ASLR
764+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765+
766+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768+#endif
769+
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773diff -urNp linux-3.0.4/arch/ia64/include/asm/pgtable.h linux-3.0.4/arch/ia64/include/asm/pgtable.h
774--- linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775+++ linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776@@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780-
781+#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785@@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789+
790+#ifdef CONFIG_PAX_PAGEEXEC
791+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794+#else
795+# define PAGE_SHARED_NOEXEC PAGE_SHARED
796+# define PAGE_READONLY_NOEXEC PAGE_READONLY
797+# define PAGE_COPY_NOEXEC PAGE_COPY
798+#endif
799+
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803diff -urNp linux-3.0.4/arch/ia64/include/asm/spinlock.h linux-3.0.4/arch/ia64/include/asm/spinlock.h
804--- linux-3.0.4/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
805+++ linux-3.0.4/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
806@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815diff -urNp linux-3.0.4/arch/ia64/include/asm/uaccess.h linux-3.0.4/arch/ia64/include/asm/uaccess.h
816--- linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817+++ linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822- if (__access_ok(__cu_to, __cu_len, get_fs())) \
823+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831- if (__access_ok(__cu_from, __cu_len, get_fs())) \
832+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836diff -urNp linux-3.0.4/arch/ia64/kernel/module.c linux-3.0.4/arch/ia64/kernel/module.c
837--- linux-3.0.4/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
838+++ linux-3.0.4/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
839@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843- if (mod && mod->arch.init_unw_table &&
844- module_region == mod->module_init) {
845+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853+in_init_rx (const struct module *mod, uint64_t addr)
854+{
855+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856+}
857+
858+static inline int
859+in_init_rw (const struct module *mod, uint64_t addr)
860+{
861+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862+}
863+
864+static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867- return addr - (uint64_t) mod->module_init < mod->init_size;
868+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869+}
870+
871+static inline int
872+in_core_rx (const struct module *mod, uint64_t addr)
873+{
874+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875+}
876+
877+static inline int
878+in_core_rw (const struct module *mod, uint64_t addr)
879+{
880+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886- return addr - (uint64_t) mod->module_core < mod->core_size;
887+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896+ if (in_init_rx(mod, val))
897+ val -= (uint64_t) mod->module_init_rx;
898+ else if (in_init_rw(mod, val))
899+ val -= (uint64_t) mod->module_init_rw;
900+ else if (in_core_rx(mod, val))
901+ val -= (uint64_t) mod->module_core_rx;
902+ else if (in_core_rw(mod, val))
903+ val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911- if (mod->core_size > MAX_LTOFF)
912+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917- gp = mod->core_size - MAX_LTOFF / 2;
918+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920- gp = mod->core_size / 2;
921- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927diff -urNp linux-3.0.4/arch/ia64/kernel/sys_ia64.c linux-3.0.4/arch/ia64/kernel/sys_ia64.c
928--- linux-3.0.4/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
929+++ linux-3.0.4/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
930@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934+
935+#ifdef CONFIG_PAX_RANDMMAP
936+ if (mm->pax_flags & MF_PAX_RANDMMAP)
937+ addr = mm->free_area_cache;
938+ else
939+#endif
940+
941 if (!addr)
942 addr = mm->free_area_cache;
943
944@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948- if (start_addr != TASK_UNMAPPED_BASE) {
949+ if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951- addr = TASK_UNMAPPED_BASE;
952+ addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957- if (!vma || addr + len <= vma->vm_start) {
958+ if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962diff -urNp linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S
963--- linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
964+++ linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
965@@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969- __phys_per_cpu_start = __per_cpu_load;
970+ __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974diff -urNp linux-3.0.4/arch/ia64/mm/fault.c linux-3.0.4/arch/ia64/mm/fault.c
975--- linux-3.0.4/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
976+++ linux-3.0.4/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
977@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981+#ifdef CONFIG_PAX_PAGEEXEC
982+void pax_report_insns(void *pc, void *sp)
983+{
984+ unsigned long i;
985+
986+ printk(KERN_ERR "PAX: bytes at PC: ");
987+ for (i = 0; i < 8; i++) {
988+ unsigned int c;
989+ if (get_user(c, (unsigned int *)pc+i))
990+ printk(KERN_CONT "???????? ");
991+ else
992+ printk(KERN_CONT "%08x ", c);
993+ }
994+ printk("\n");
995+}
996+#endif
997+
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005- if ((vma->vm_flags & mask) != mask)
1006+ if ((vma->vm_flags & mask) != mask) {
1007+
1008+#ifdef CONFIG_PAX_PAGEEXEC
1009+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011+ goto bad_area;
1012+
1013+ up_read(&mm->mmap_sem);
1014+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015+ do_group_exit(SIGKILL);
1016+ }
1017+#endif
1018+
1019 goto bad_area;
1020
1021+ }
1022+
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026diff -urNp linux-3.0.4/arch/ia64/mm/hugetlbpage.c linux-3.0.4/arch/ia64/mm/hugetlbpage.c
1027--- linux-3.0.4/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1028+++ linux-3.0.4/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1029@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033- if (!vmm || (addr + len) <= vmm->vm_start)
1034+ if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038diff -urNp linux-3.0.4/arch/ia64/mm/init.c linux-3.0.4/arch/ia64/mm/init.c
1039--- linux-3.0.4/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1040+++ linux-3.0.4/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1041@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045+
1046+#ifdef CONFIG_PAX_PAGEEXEC
1047+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048+ vma->vm_flags &= ~VM_EXEC;
1049+
1050+#ifdef CONFIG_PAX_MPROTECT
1051+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052+ vma->vm_flags &= ~VM_MAYEXEC;
1053+#endif
1054+
1055+ }
1056+#endif
1057+
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061diff -urNp linux-3.0.4/arch/m32r/lib/usercopy.c linux-3.0.4/arch/m32r/lib/usercopy.c
1062--- linux-3.0.4/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1063+++ linux-3.0.4/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1064@@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068+ if ((long)n < 0)
1069+ return n;
1070+
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078+ if ((long)n < 0)
1079+ return n;
1080+
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084diff -urNp linux-3.0.4/arch/mips/include/asm/elf.h linux-3.0.4/arch/mips/include/asm/elf.h
1085--- linux-3.0.4/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1086+++ linux-3.0.4/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1087@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091+#ifdef CONFIG_PAX_ASLR
1092+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093+
1094+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096+#endif
1097+
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103-struct mm_struct;
1104-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105-#define arch_randomize_brk arch_randomize_brk
1106-
1107 #endif /* _ASM_ELF_H */
1108diff -urNp linux-3.0.4/arch/mips/include/asm/page.h linux-3.0.4/arch/mips/include/asm/page.h
1109--- linux-3.0.4/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1110+++ linux-3.0.4/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1111@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120diff -urNp linux-3.0.4/arch/mips/include/asm/system.h linux-3.0.4/arch/mips/include/asm/system.h
1121--- linux-3.0.4/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1122+++ linux-3.0.4/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1123@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127-extern unsigned long arch_align_stack(unsigned long sp);
1128+#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c
1132--- linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1133+++ linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1134@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138+#ifdef CONFIG_PAX_ASLR
1139+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140+
1141+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143+#endif
1144+
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c
1149--- linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1150+++ linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1151@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155+#ifdef CONFIG_PAX_ASLR
1156+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157+
1158+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160+#endif
1161+
1162 #include <asm/processor.h>
1163
1164 /*
1165diff -urNp linux-3.0.4/arch/mips/kernel/process.c linux-3.0.4/arch/mips/kernel/process.c
1166--- linux-3.0.4/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1167+++ linux-3.0.4/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1168@@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172-
1173-/*
1174- * Don't forget that the stack pointer must be aligned on a 8 bytes
1175- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176- */
1177-unsigned long arch_align_stack(unsigned long sp)
1178-{
1179- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180- sp -= get_random_int() & ~PAGE_MASK;
1181-
1182- return sp & ALMASK;
1183-}
1184diff -urNp linux-3.0.4/arch/mips/mm/fault.c linux-3.0.4/arch/mips/mm/fault.c
1185--- linux-3.0.4/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1186+++ linux-3.0.4/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1187@@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191+#ifdef CONFIG_PAX_PAGEEXEC
1192+void pax_report_insns(void *pc, void *sp)
1193+{
1194+ unsigned long i;
1195+
1196+ printk(KERN_ERR "PAX: bytes at PC: ");
1197+ for (i = 0; i < 5; i++) {
1198+ unsigned int c;
1199+ if (get_user(c, (unsigned int *)pc+i))
1200+ printk(KERN_CONT "???????? ");
1201+ else
1202+ printk(KERN_CONT "%08x ", c);
1203+ }
1204+ printk("\n");
1205+}
1206+#endif
1207+
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211diff -urNp linux-3.0.4/arch/mips/mm/mmap.c linux-3.0.4/arch/mips/mm/mmap.c
1212--- linux-3.0.4/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1213+++ linux-3.0.4/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1214@@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215 do_color_align = 0;
1216 if (filp || (flags & MAP_SHARED))
1217 do_color_align = 1;
1218+
1219+#ifdef CONFIG_PAX_RANDMMAP
1220+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221+#endif
1222+
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226 else
1227 addr = PAGE_ALIGN(addr);
1228 vmm = find_vma(current->mm, addr);
1229- if (TASK_SIZE - len >= addr &&
1230- (!vmm || addr + len <= vmm->vm_start))
1231+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232 return addr;
1233 }
1234 addr = current->mm->mmap_base;
1235@@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236 /* At this point: (!vmm || addr < vmm->vm_end). */
1237 if (TASK_SIZE - len < addr)
1238 return -ENOMEM;
1239- if (!vmm || addr + len <= vmm->vm_start)
1240+ if (check_heap_stack_gap(vmm, addr, len))
1241 return addr;
1242 addr = vmm->vm_end;
1243 if (do_color_align)
1244@@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245 mm->get_unmapped_area = arch_get_unmapped_area;
1246 mm->unmap_area = arch_unmap_area;
1247 }
1248-
1249-static inline unsigned long brk_rnd(void)
1250-{
1251- unsigned long rnd = get_random_int();
1252-
1253- rnd = rnd << PAGE_SHIFT;
1254- /* 8MB for 32bit, 256MB for 64bit */
1255- if (TASK_IS_32BIT_ADDR)
1256- rnd = rnd & 0x7ffffful;
1257- else
1258- rnd = rnd & 0xffffffful;
1259-
1260- return rnd;
1261-}
1262-
1263-unsigned long arch_randomize_brk(struct mm_struct *mm)
1264-{
1265- unsigned long base = mm->brk;
1266- unsigned long ret;
1267-
1268- ret = PAGE_ALIGN(base + brk_rnd());
1269-
1270- if (ret < mm->brk)
1271- return mm->brk;
1272-
1273- return ret;
1274-}
1275diff -urNp linux-3.0.4/arch/parisc/include/asm/elf.h linux-3.0.4/arch/parisc/include/asm/elf.h
1276--- linux-3.0.4/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1277+++ linux-3.0.4/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1278@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1279
1280 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1281
1282+#ifdef CONFIG_PAX_ASLR
1283+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1284+
1285+#define PAX_DELTA_MMAP_LEN 16
1286+#define PAX_DELTA_STACK_LEN 16
1287+#endif
1288+
1289 /* This yields a mask that user programs can use to figure out what
1290 instruction set this CPU supports. This could be done in user space,
1291 but it's not easy, and we've already done it here. */
1292diff -urNp linux-3.0.4/arch/parisc/include/asm/pgtable.h linux-3.0.4/arch/parisc/include/asm/pgtable.h
1293--- linux-3.0.4/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1294+++ linux-3.0.4/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1295@@ -210,6 +210,17 @@ struct vm_area_struct;
1296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297 #define PAGE_COPY PAGE_EXECREAD
1298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299+
1300+#ifdef CONFIG_PAX_PAGEEXEC
1301+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304+#else
1305+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1306+# define PAGE_COPY_NOEXEC PAGE_COPY
1307+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1308+#endif
1309+
1310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1313diff -urNp linux-3.0.4/arch/parisc/kernel/module.c linux-3.0.4/arch/parisc/kernel/module.c
1314--- linux-3.0.4/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1315+++ linux-3.0.4/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1316@@ -98,16 +98,38 @@
1317
1318 /* three functions to determine where in the module core
1319 * or init pieces the location is */
1320+static inline int in_init_rx(struct module *me, void *loc)
1321+{
1322+ return (loc >= me->module_init_rx &&
1323+ loc < (me->module_init_rx + me->init_size_rx));
1324+}
1325+
1326+static inline int in_init_rw(struct module *me, void *loc)
1327+{
1328+ return (loc >= me->module_init_rw &&
1329+ loc < (me->module_init_rw + me->init_size_rw));
1330+}
1331+
1332 static inline int in_init(struct module *me, void *loc)
1333 {
1334- return (loc >= me->module_init &&
1335- loc <= (me->module_init + me->init_size));
1336+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1337+}
1338+
1339+static inline int in_core_rx(struct module *me, void *loc)
1340+{
1341+ return (loc >= me->module_core_rx &&
1342+ loc < (me->module_core_rx + me->core_size_rx));
1343+}
1344+
1345+static inline int in_core_rw(struct module *me, void *loc)
1346+{
1347+ return (loc >= me->module_core_rw &&
1348+ loc < (me->module_core_rw + me->core_size_rw));
1349 }
1350
1351 static inline int in_core(struct module *me, void *loc)
1352 {
1353- return (loc >= me->module_core &&
1354- loc <= (me->module_core + me->core_size));
1355+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1356 }
1357
1358 static inline int in_local(struct module *me, void *loc)
1359@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360 }
1361
1362 /* align things a bit */
1363- me->core_size = ALIGN(me->core_size, 16);
1364- me->arch.got_offset = me->core_size;
1365- me->core_size += gots * sizeof(struct got_entry);
1366-
1367- me->core_size = ALIGN(me->core_size, 16);
1368- me->arch.fdesc_offset = me->core_size;
1369- me->core_size += fdescs * sizeof(Elf_Fdesc);
1370+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371+ me->arch.got_offset = me->core_size_rw;
1372+ me->core_size_rw += gots * sizeof(struct got_entry);
1373+
1374+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375+ me->arch.fdesc_offset = me->core_size_rw;
1376+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377
1378 me->arch.got_max = gots;
1379 me->arch.fdesc_max = fdescs;
1380@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1381
1382 BUG_ON(value == 0);
1383
1384- got = me->module_core + me->arch.got_offset;
1385+ got = me->module_core_rw + me->arch.got_offset;
1386 for (i = 0; got[i].addr; i++)
1387 if (got[i].addr == value)
1388 goto out;
1389@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1390 #ifdef CONFIG_64BIT
1391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392 {
1393- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395
1396 if (!value) {
1397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1399
1400 /* Create new one */
1401 fdesc->addr = value;
1402- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404 return (Elf_Addr)fdesc;
1405 }
1406 #endif /* CONFIG_64BIT */
1407@@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408
1409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410 end = table + sechdrs[me->arch.unwind_section].sh_size;
1411- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413
1414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415 me->arch.unwind_section, table, end, gp);
1416diff -urNp linux-3.0.4/arch/parisc/kernel/sys_parisc.c linux-3.0.4/arch/parisc/kernel/sys_parisc.c
1417--- linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418+++ linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420 /* At this point: (!vma || addr < vma->vm_end). */
1421 if (TASK_SIZE - len < addr)
1422 return -ENOMEM;
1423- if (!vma || addr + len <= vma->vm_start)
1424+ if (check_heap_stack_gap(vma, addr, len))
1425 return addr;
1426 addr = vma->vm_end;
1427 }
1428@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429 /* At this point: (!vma || addr < vma->vm_end). */
1430 if (TASK_SIZE - len < addr)
1431 return -ENOMEM;
1432- if (!vma || addr + len <= vma->vm_start)
1433+ if (check_heap_stack_gap(vma, addr, len))
1434 return addr;
1435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436 if (addr < vma->vm_end) /* handle wraparound */
1437@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438 if (flags & MAP_FIXED)
1439 return addr;
1440 if (!addr)
1441- addr = TASK_UNMAPPED_BASE;
1442+ addr = current->mm->mmap_base;
1443
1444 if (filp) {
1445 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446diff -urNp linux-3.0.4/arch/parisc/kernel/traps.c linux-3.0.4/arch/parisc/kernel/traps.c
1447--- linux-3.0.4/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1448+++ linux-3.0.4/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1449@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450
1451 down_read(&current->mm->mmap_sem);
1452 vma = find_vma(current->mm,regs->iaoq[0]);
1453- if (vma && (regs->iaoq[0] >= vma->vm_start)
1454- && (vma->vm_flags & VM_EXEC)) {
1455-
1456+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457 fault_address = regs->iaoq[0];
1458 fault_space = regs->iasq[0];
1459
1460diff -urNp linux-3.0.4/arch/parisc/mm/fault.c linux-3.0.4/arch/parisc/mm/fault.c
1461--- linux-3.0.4/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1462+++ linux-3.0.4/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1463@@ -15,6 +15,7 @@
1464 #include <linux/sched.h>
1465 #include <linux/interrupt.h>
1466 #include <linux/module.h>
1467+#include <linux/unistd.h>
1468
1469 #include <asm/uaccess.h>
1470 #include <asm/traps.h>
1471@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472 static unsigned long
1473 parisc_acctyp(unsigned long code, unsigned int inst)
1474 {
1475- if (code == 6 || code == 16)
1476+ if (code == 6 || code == 7 || code == 16)
1477 return VM_EXEC;
1478
1479 switch (inst & 0xf0000000) {
1480@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481 }
1482 #endif
1483
1484+#ifdef CONFIG_PAX_PAGEEXEC
1485+/*
1486+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487+ *
1488+ * returns 1 when task should be killed
1489+ * 2 when rt_sigreturn trampoline was detected
1490+ * 3 when unpatched PLT trampoline was detected
1491+ */
1492+static int pax_handle_fetch_fault(struct pt_regs *regs)
1493+{
1494+
1495+#ifdef CONFIG_PAX_EMUPLT
1496+ int err;
1497+
1498+ do { /* PaX: unpatched PLT emulation */
1499+ unsigned int bl, depwi;
1500+
1501+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503+
1504+ if (err)
1505+ break;
1506+
1507+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509+
1510+ err = get_user(ldw, (unsigned int *)addr);
1511+ err |= get_user(bv, (unsigned int *)(addr+4));
1512+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1513+
1514+ if (err)
1515+ break;
1516+
1517+ if (ldw == 0x0E801096U &&
1518+ bv == 0xEAC0C000U &&
1519+ ldw2 == 0x0E881095U)
1520+ {
1521+ unsigned int resolver, map;
1522+
1523+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525+ if (err)
1526+ break;
1527+
1528+ regs->gr[20] = instruction_pointer(regs)+8;
1529+ regs->gr[21] = map;
1530+ regs->gr[22] = resolver;
1531+ regs->iaoq[0] = resolver | 3UL;
1532+ regs->iaoq[1] = regs->iaoq[0] + 4;
1533+ return 3;
1534+ }
1535+ }
1536+ } while (0);
1537+#endif
1538+
1539+#ifdef CONFIG_PAX_EMUTRAMP
1540+
1541+#ifndef CONFIG_PAX_EMUSIGRT
1542+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543+ return 1;
1544+#endif
1545+
1546+ do { /* PaX: rt_sigreturn emulation */
1547+ unsigned int ldi1, ldi2, bel, nop;
1548+
1549+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553+
1554+ if (err)
1555+ break;
1556+
1557+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558+ ldi2 == 0x3414015AU &&
1559+ bel == 0xE4008200U &&
1560+ nop == 0x08000240U)
1561+ {
1562+ regs->gr[25] = (ldi1 & 2) >> 1;
1563+ regs->gr[20] = __NR_rt_sigreturn;
1564+ regs->gr[31] = regs->iaoq[1] + 16;
1565+ regs->sr[0] = regs->iasq[1];
1566+ regs->iaoq[0] = 0x100UL;
1567+ regs->iaoq[1] = regs->iaoq[0] + 4;
1568+ regs->iasq[0] = regs->sr[2];
1569+ regs->iasq[1] = regs->sr[2];
1570+ return 2;
1571+ }
1572+ } while (0);
1573+#endif
1574+
1575+ return 1;
1576+}
1577+
1578+void pax_report_insns(void *pc, void *sp)
1579+{
1580+ unsigned long i;
1581+
1582+ printk(KERN_ERR "PAX: bytes at PC: ");
1583+ for (i = 0; i < 5; i++) {
1584+ unsigned int c;
1585+ if (get_user(c, (unsigned int *)pc+i))
1586+ printk(KERN_CONT "???????? ");
1587+ else
1588+ printk(KERN_CONT "%08x ", c);
1589+ }
1590+ printk("\n");
1591+}
1592+#endif
1593+
1594 int fixup_exception(struct pt_regs *regs)
1595 {
1596 const struct exception_table_entry *fix;
1597@@ -192,8 +303,33 @@ good_area:
1598
1599 acc_type = parisc_acctyp(code,regs->iir);
1600
1601- if ((vma->vm_flags & acc_type) != acc_type)
1602+ if ((vma->vm_flags & acc_type) != acc_type) {
1603+
1604+#ifdef CONFIG_PAX_PAGEEXEC
1605+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606+ (address & ~3UL) == instruction_pointer(regs))
1607+ {
1608+ up_read(&mm->mmap_sem);
1609+ switch (pax_handle_fetch_fault(regs)) {
1610+
1611+#ifdef CONFIG_PAX_EMUPLT
1612+ case 3:
1613+ return;
1614+#endif
1615+
1616+#ifdef CONFIG_PAX_EMUTRAMP
1617+ case 2:
1618+ return;
1619+#endif
1620+
1621+ }
1622+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623+ do_group_exit(SIGKILL);
1624+ }
1625+#endif
1626+
1627 goto bad_area;
1628+ }
1629
1630 /*
1631 * If for any reason at all we couldn't handle the fault, make
1632diff -urNp linux-3.0.4/arch/powerpc/include/asm/elf.h linux-3.0.4/arch/powerpc/include/asm/elf.h
1633--- linux-3.0.4/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1634+++ linux-3.0.4/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1635@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636 the loader. We need to make sure that it is out of the way of the program
1637 that it will "exec", and that there is sufficient room for the brk. */
1638
1639-extern unsigned long randomize_et_dyn(unsigned long base);
1640-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1641+#define ELF_ET_DYN_BASE (0x20000000)
1642+
1643+#ifdef CONFIG_PAX_ASLR
1644+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1645+
1646+#ifdef __powerpc64__
1647+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1648+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1649+#else
1650+#define PAX_DELTA_MMAP_LEN 15
1651+#define PAX_DELTA_STACK_LEN 15
1652+#endif
1653+#endif
1654
1655 /*
1656 * Our registers are always unsigned longs, whether we're a 32 bit
1657@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658 (0x7ff >> (PAGE_SHIFT - 12)) : \
1659 (0x3ffff >> (PAGE_SHIFT - 12)))
1660
1661-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662-#define arch_randomize_brk arch_randomize_brk
1663-
1664 #endif /* __KERNEL__ */
1665
1666 /*
1667diff -urNp linux-3.0.4/arch/powerpc/include/asm/kmap_types.h linux-3.0.4/arch/powerpc/include/asm/kmap_types.h
1668--- linux-3.0.4/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1669+++ linux-3.0.4/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1670@@ -27,6 +27,7 @@ enum km_type {
1671 KM_PPC_SYNC_PAGE,
1672 KM_PPC_SYNC_ICACHE,
1673 KM_KDB,
1674+ KM_CLEARPAGE,
1675 KM_TYPE_NR
1676 };
1677
1678diff -urNp linux-3.0.4/arch/powerpc/include/asm/mman.h linux-3.0.4/arch/powerpc/include/asm/mman.h
1679--- linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680+++ linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682 }
1683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684
1685-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687 {
1688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689 }
1690diff -urNp linux-3.0.4/arch/powerpc/include/asm/page_64.h linux-3.0.4/arch/powerpc/include/asm/page_64.h
1691--- linux-3.0.4/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
1692+++ linux-3.0.4/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
1693@@ -155,15 +155,18 @@ do { \
1694 * stack by default, so in the absence of a PT_GNU_STACK program header
1695 * we turn execute permission off.
1696 */
1697-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1698- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699+#define VM_STACK_DEFAULT_FLAGS32 \
1700+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702
1703 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1704 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705
1706+#ifndef CONFIG_PAX_PAGEEXEC
1707 #define VM_STACK_DEFAULT_FLAGS \
1708 (is_32bit_task() ? \
1709 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710+#endif
1711
1712 #include <asm-generic/getorder.h>
1713
1714diff -urNp linux-3.0.4/arch/powerpc/include/asm/page.h linux-3.0.4/arch/powerpc/include/asm/page.h
1715--- linux-3.0.4/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716+++ linux-3.0.4/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718 * and needs to be executable. This means the whole heap ends
1719 * up being executable.
1720 */
1721-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1722- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723+#define VM_DATA_DEFAULT_FLAGS32 \
1724+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726
1727 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1728 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1731 #endif
1732
1733+#define ktla_ktva(addr) (addr)
1734+#define ktva_ktla(addr) (addr)
1735+
1736 #ifndef __ASSEMBLY__
1737
1738 #undef STRICT_MM_TYPECHECKS
1739diff -urNp linux-3.0.4/arch/powerpc/include/asm/pgtable.h linux-3.0.4/arch/powerpc/include/asm/pgtable.h
1740--- linux-3.0.4/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1741+++ linux-3.0.4/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1742@@ -2,6 +2,7 @@
1743 #define _ASM_POWERPC_PGTABLE_H
1744 #ifdef __KERNEL__
1745
1746+#include <linux/const.h>
1747 #ifndef __ASSEMBLY__
1748 #include <asm/processor.h> /* For TASK_SIZE */
1749 #include <asm/mmu.h>
1750diff -urNp linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h
1751--- linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
1752+++ linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
1753@@ -21,6 +21,7 @@
1754 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1755 #define _PAGE_USER 0x004 /* usermode access allowed */
1756 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1757+#define _PAGE_EXEC _PAGE_GUARDED
1758 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1759 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1760 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1761diff -urNp linux-3.0.4/arch/powerpc/include/asm/reg.h linux-3.0.4/arch/powerpc/include/asm/reg.h
1762--- linux-3.0.4/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
1763+++ linux-3.0.4/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
1764@@ -209,6 +209,7 @@
1765 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1766 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1767 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1768+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1769 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1770 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1771 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1772diff -urNp linux-3.0.4/arch/powerpc/include/asm/system.h linux-3.0.4/arch/powerpc/include/asm/system.h
1773--- linux-3.0.4/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1774+++ linux-3.0.4/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1775@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777 #endif
1778
1779-extern unsigned long arch_align_stack(unsigned long sp);
1780+#define arch_align_stack(x) ((x) & ~0xfUL)
1781
1782 /* Used in very early kernel initialization. */
1783 extern unsigned long reloc_offset(void);
1784diff -urNp linux-3.0.4/arch/powerpc/include/asm/uaccess.h linux-3.0.4/arch/powerpc/include/asm/uaccess.h
1785--- linux-3.0.4/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1786+++ linux-3.0.4/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1787@@ -13,6 +13,8 @@
1788 #define VERIFY_READ 0
1789 #define VERIFY_WRITE 1
1790
1791+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792+
1793 /*
1794 * The fs value determines whether argument validity checking should be
1795 * performed or not. If get_fs() == USER_DS, checking is performed, with
1796@@ -327,52 +329,6 @@ do { \
1797 extern unsigned long __copy_tofrom_user(void __user *to,
1798 const void __user *from, unsigned long size);
1799
1800-#ifndef __powerpc64__
1801-
1802-static inline unsigned long copy_from_user(void *to,
1803- const void __user *from, unsigned long n)
1804-{
1805- unsigned long over;
1806-
1807- if (access_ok(VERIFY_READ, from, n))
1808- return __copy_tofrom_user((__force void __user *)to, from, n);
1809- if ((unsigned long)from < TASK_SIZE) {
1810- over = (unsigned long)from + n - TASK_SIZE;
1811- return __copy_tofrom_user((__force void __user *)to, from,
1812- n - over) + over;
1813- }
1814- return n;
1815-}
1816-
1817-static inline unsigned long copy_to_user(void __user *to,
1818- const void *from, unsigned long n)
1819-{
1820- unsigned long over;
1821-
1822- if (access_ok(VERIFY_WRITE, to, n))
1823- return __copy_tofrom_user(to, (__force void __user *)from, n);
1824- if ((unsigned long)to < TASK_SIZE) {
1825- over = (unsigned long)to + n - TASK_SIZE;
1826- return __copy_tofrom_user(to, (__force void __user *)from,
1827- n - over) + over;
1828- }
1829- return n;
1830-}
1831-
1832-#else /* __powerpc64__ */
1833-
1834-#define __copy_in_user(to, from, size) \
1835- __copy_tofrom_user((to), (from), (size))
1836-
1837-extern unsigned long copy_from_user(void *to, const void __user *from,
1838- unsigned long n);
1839-extern unsigned long copy_to_user(void __user *to, const void *from,
1840- unsigned long n);
1841-extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842- unsigned long n);
1843-
1844-#endif /* __powerpc64__ */
1845-
1846 static inline unsigned long __copy_from_user_inatomic(void *to,
1847 const void __user *from, unsigned long n)
1848 {
1849@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850 if (ret == 0)
1851 return 0;
1852 }
1853+
1854+ if (!__builtin_constant_p(n))
1855+ check_object_size(to, n, false);
1856+
1857 return __copy_tofrom_user((__force void __user *)to, from, n);
1858 }
1859
1860@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861 if (ret == 0)
1862 return 0;
1863 }
1864+
1865+ if (!__builtin_constant_p(n))
1866+ check_object_size(from, n, true);
1867+
1868 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869 }
1870
1871@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872 return __copy_to_user_inatomic(to, from, size);
1873 }
1874
1875+#ifndef __powerpc64__
1876+
1877+static inline unsigned long __must_check copy_from_user(void *to,
1878+ const void __user *from, unsigned long n)
1879+{
1880+ unsigned long over;
1881+
1882+ if ((long)n < 0)
1883+ return n;
1884+
1885+ if (access_ok(VERIFY_READ, from, n)) {
1886+ if (!__builtin_constant_p(n))
1887+ check_object_size(to, n, false);
1888+ return __copy_tofrom_user((__force void __user *)to, from, n);
1889+ }
1890+ if ((unsigned long)from < TASK_SIZE) {
1891+ over = (unsigned long)from + n - TASK_SIZE;
1892+ if (!__builtin_constant_p(n - over))
1893+ check_object_size(to, n - over, false);
1894+ return __copy_tofrom_user((__force void __user *)to, from,
1895+ n - over) + over;
1896+ }
1897+ return n;
1898+}
1899+
1900+static inline unsigned long __must_check copy_to_user(void __user *to,
1901+ const void *from, unsigned long n)
1902+{
1903+ unsigned long over;
1904+
1905+ if ((long)n < 0)
1906+ return n;
1907+
1908+ if (access_ok(VERIFY_WRITE, to, n)) {
1909+ if (!__builtin_constant_p(n))
1910+ check_object_size(from, n, true);
1911+ return __copy_tofrom_user(to, (__force void __user *)from, n);
1912+ }
1913+ if ((unsigned long)to < TASK_SIZE) {
1914+ over = (unsigned long)to + n - TASK_SIZE;
1915+ if (!__builtin_constant_p(n))
1916+ check_object_size(from, n - over, true);
1917+ return __copy_tofrom_user(to, (__force void __user *)from,
1918+ n - over) + over;
1919+ }
1920+ return n;
1921+}
1922+
1923+#else /* __powerpc64__ */
1924+
1925+#define __copy_in_user(to, from, size) \
1926+ __copy_tofrom_user((to), (from), (size))
1927+
1928+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929+{
1930+ if ((long)n < 0 || n > INT_MAX)
1931+ return n;
1932+
1933+ if (!__builtin_constant_p(n))
1934+ check_object_size(to, n, false);
1935+
1936+ if (likely(access_ok(VERIFY_READ, from, n)))
1937+ n = __copy_from_user(to, from, n);
1938+ else
1939+ memset(to, 0, n);
1940+ return n;
1941+}
1942+
1943+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944+{
1945+ if ((long)n < 0 || n > INT_MAX)
1946+ return n;
1947+
1948+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949+ if (!__builtin_constant_p(n))
1950+ check_object_size(from, n, true);
1951+ n = __copy_to_user(to, from, n);
1952+ }
1953+ return n;
1954+}
1955+
1956+extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957+ unsigned long n);
1958+
1959+#endif /* __powerpc64__ */
1960+
1961 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962
1963 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S
1965--- linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
1966+++ linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
1967@@ -567,6 +567,7 @@ storage_fault_common:
1968 std r14,_DAR(r1)
1969 std r15,_DSISR(r1)
1970 addi r3,r1,STACK_FRAME_OVERHEAD
1971+ bl .save_nvgprs
1972 mr r4,r14
1973 mr r5,r15
1974 ld r14,PACA_EXGEN+EX_R14(r13)
1975@@ -576,8 +577,7 @@ storage_fault_common:
1976 cmpdi r3,0
1977 bne- 1f
1978 b .ret_from_except_lite
1979-1: bl .save_nvgprs
1980- mr r5,r3
1981+1: mr r5,r3
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 ld r4,_DAR(r1)
1984 bl .bad_page_fault
1985diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S
1986--- linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
1987+++ linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
1988@@ -956,10 +956,10 @@ handle_page_fault:
1989 11: ld r4,_DAR(r1)
1990 ld r5,_DSISR(r1)
1991 addi r3,r1,STACK_FRAME_OVERHEAD
1992+ bl .save_nvgprs
1993 bl .do_page_fault
1994 cmpdi r3,0
1995 beq+ 13f
1996- bl .save_nvgprs
1997 mr r5,r3
1998 addi r3,r1,STACK_FRAME_OVERHEAD
1999 lwz r4,_DAR(r1)
2000diff -urNp linux-3.0.4/arch/powerpc/kernel/module_32.c linux-3.0.4/arch/powerpc/kernel/module_32.c
2001--- linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002+++ linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004 me->arch.core_plt_section = i;
2005 }
2006 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007- printk("Module doesn't contain .plt or .init.plt sections.\n");
2008+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009 return -ENOEXEC;
2010 }
2011
2012@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013
2014 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015 /* Init, or core PLT? */
2016- if (location >= mod->module_core
2017- && location < mod->module_core + mod->core_size)
2018+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021- else
2022+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025+ else {
2026+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027+ return ~0UL;
2028+ }
2029
2030 /* Find this entry, or if that fails, the next avail. entry */
2031 while (entry->jump[0]) {
2032diff -urNp linux-3.0.4/arch/powerpc/kernel/module.c linux-3.0.4/arch/powerpc/kernel/module.c
2033--- linux-3.0.4/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2034+++ linux-3.0.4/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2035@@ -31,11 +31,24 @@
2036
2037 LIST_HEAD(module_bug_list);
2038
2039+#ifdef CONFIG_PAX_KERNEXEC
2040 void *module_alloc(unsigned long size)
2041 {
2042 if (size == 0)
2043 return NULL;
2044
2045+ return vmalloc(size);
2046+}
2047+
2048+void *module_alloc_exec(unsigned long size)
2049+#else
2050+void *module_alloc(unsigned long size)
2051+#endif
2052+
2053+{
2054+ if (size == 0)
2055+ return NULL;
2056+
2057 return vmalloc_exec(size);
2058 }
2059
2060@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061 vfree(module_region);
2062 }
2063
2064+#ifdef CONFIG_PAX_KERNEXEC
2065+void module_free_exec(struct module *mod, void *module_region)
2066+{
2067+ module_free(mod, module_region);
2068+}
2069+#endif
2070+
2071 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072 const Elf_Shdr *sechdrs,
2073 const char *name)
2074diff -urNp linux-3.0.4/arch/powerpc/kernel/process.c linux-3.0.4/arch/powerpc/kernel/process.c
2075--- linux-3.0.4/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2076+++ linux-3.0.4/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2077@@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078 * Lookup NIP late so we have the best change of getting the
2079 * above info out without failing
2080 */
2081- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085 #endif
2086 show_stack(current, (unsigned long *) regs->gpr[1]);
2087 if (!user_mode(regs))
2088@@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089 newsp = stack[0];
2090 ip = stack[STACK_FRAME_LR_SAVE];
2091 if (!firstframe || ip != lr) {
2092- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096- printk(" (%pS)",
2097+ printk(" (%pA)",
2098 (void *)current->ret_stack[curr_frame].ret);
2099 curr_frame--;
2100 }
2101@@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102 struct pt_regs *regs = (struct pt_regs *)
2103 (sp + STACK_FRAME_OVERHEAD);
2104 lr = regs->link;
2105- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2106+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2107 regs->trap, (void *)regs->nip, (void *)lr);
2108 firstframe = 1;
2109 }
2110@@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111 }
2112
2113 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114-
2115-unsigned long arch_align_stack(unsigned long sp)
2116-{
2117- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118- sp -= get_random_int() & ~PAGE_MASK;
2119- return sp & ~0xf;
2120-}
2121-
2122-static inline unsigned long brk_rnd(void)
2123-{
2124- unsigned long rnd = 0;
2125-
2126- /* 8MB for 32bit, 1GB for 64bit */
2127- if (is_32bit_task())
2128- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129- else
2130- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131-
2132- return rnd << PAGE_SHIFT;
2133-}
2134-
2135-unsigned long arch_randomize_brk(struct mm_struct *mm)
2136-{
2137- unsigned long base = mm->brk;
2138- unsigned long ret;
2139-
2140-#ifdef CONFIG_PPC_STD_MMU_64
2141- /*
2142- * If we are using 1TB segments and we are allowed to randomise
2143- * the heap, we can put it above 1TB so it is backed by a 1TB
2144- * segment. Otherwise the heap will be in the bottom 1TB
2145- * which always uses 256MB segments and this may result in a
2146- * performance penalty.
2147- */
2148- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150-#endif
2151-
2152- ret = PAGE_ALIGN(base + brk_rnd());
2153-
2154- if (ret < mm->brk)
2155- return mm->brk;
2156-
2157- return ret;
2158-}
2159-
2160-unsigned long randomize_et_dyn(unsigned long base)
2161-{
2162- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163-
2164- if (ret < base)
2165- return base;
2166-
2167- return ret;
2168-}
2169diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_32.c linux-3.0.4/arch/powerpc/kernel/signal_32.c
2170--- linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171+++ linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173 /* Save user registers on the stack */
2174 frame = &rt_sf->uc.uc_mcontext;
2175 addr = frame;
2176- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178 if (save_user_regs(regs, frame, 0, 1))
2179 goto badframe;
2180 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_64.c linux-3.0.4/arch/powerpc/kernel/signal_64.c
2182--- linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183+++ linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185 current->thread.fpscr.val = 0;
2186
2187 /* Set up to return from userspace. */
2188- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191 } else {
2192 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193diff -urNp linux-3.0.4/arch/powerpc/kernel/traps.c linux-3.0.4/arch/powerpc/kernel/traps.c
2194--- linux-3.0.4/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2195+++ linux-3.0.4/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2196@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197 static inline void pmac_backlight_unblank(void) { }
2198 #endif
2199
2200+extern void gr_handle_kernel_exploit(void);
2201+
2202 int die(const char *str, struct pt_regs *regs, long err)
2203 {
2204 static struct {
2205@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2206 if (panic_on_oops)
2207 panic("Fatal exception");
2208
2209+ gr_handle_kernel_exploit();
2210+
2211 oops_exit();
2212 do_exit(err);
2213
2214diff -urNp linux-3.0.4/arch/powerpc/kernel/vdso.c linux-3.0.4/arch/powerpc/kernel/vdso.c
2215--- linux-3.0.4/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2216+++ linux-3.0.4/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2217@@ -36,6 +36,7 @@
2218 #include <asm/firmware.h>
2219 #include <asm/vdso.h>
2220 #include <asm/vdso_datapage.h>
2221+#include <asm/mman.h>
2222
2223 #include "setup.h"
2224
2225@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = VDSO32_MBASE;
2227 #endif
2228
2229- current->mm->context.vdso_base = 0;
2230+ current->mm->context.vdso_base = ~0UL;
2231
2232 /* vDSO has a problem and was disabled, just don't "enable" it for the
2233 * process
2234@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235 vdso_base = get_unmapped_area(NULL, vdso_base,
2236 (vdso_pages << PAGE_SHIFT) +
2237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238- 0, 0);
2239+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2240 if (IS_ERR_VALUE(vdso_base)) {
2241 rc = vdso_base;
2242 goto fail_mmapsem;
2243diff -urNp linux-3.0.4/arch/powerpc/lib/usercopy_64.c linux-3.0.4/arch/powerpc/lib/usercopy_64.c
2244--- linux-3.0.4/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2245+++ linux-3.0.4/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2246@@ -9,22 +9,6 @@
2247 #include <linux/module.h>
2248 #include <asm/uaccess.h>
2249
2250-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251-{
2252- if (likely(access_ok(VERIFY_READ, from, n)))
2253- n = __copy_from_user(to, from, n);
2254- else
2255- memset(to, 0, n);
2256- return n;
2257-}
2258-
2259-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260-{
2261- if (likely(access_ok(VERIFY_WRITE, to, n)))
2262- n = __copy_to_user(to, from, n);
2263- return n;
2264-}
2265-
2266 unsigned long copy_in_user(void __user *to, const void __user *from,
2267 unsigned long n)
2268 {
2269@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270 return n;
2271 }
2272
2273-EXPORT_SYMBOL(copy_from_user);
2274-EXPORT_SYMBOL(copy_to_user);
2275 EXPORT_SYMBOL(copy_in_user);
2276
2277diff -urNp linux-3.0.4/arch/powerpc/mm/fault.c linux-3.0.4/arch/powerpc/mm/fault.c
2278--- linux-3.0.4/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279+++ linux-3.0.4/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280@@ -32,6 +32,10 @@
2281 #include <linux/perf_event.h>
2282 #include <linux/magic.h>
2283 #include <linux/ratelimit.h>
2284+#include <linux/slab.h>
2285+#include <linux/pagemap.h>
2286+#include <linux/compiler.h>
2287+#include <linux/unistd.h>
2288
2289 #include <asm/firmware.h>
2290 #include <asm/page.h>
2291@@ -43,6 +47,7 @@
2292 #include <asm/tlbflush.h>
2293 #include <asm/siginfo.h>
2294 #include <mm/mmu_decl.h>
2295+#include <asm/ptrace.h>
2296
2297 #ifdef CONFIG_KPROBES
2298 static inline int notify_page_fault(struct pt_regs *regs)
2299@@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300 }
2301 #endif
2302
2303+#ifdef CONFIG_PAX_PAGEEXEC
2304+/*
2305+ * PaX: decide what to do with offenders (regs->nip = fault address)
2306+ *
2307+ * returns 1 when task should be killed
2308+ */
2309+static int pax_handle_fetch_fault(struct pt_regs *regs)
2310+{
2311+ return 1;
2312+}
2313+
2314+void pax_report_insns(void *pc, void *sp)
2315+{
2316+ unsigned long i;
2317+
2318+ printk(KERN_ERR "PAX: bytes at PC: ");
2319+ for (i = 0; i < 5; i++) {
2320+ unsigned int c;
2321+ if (get_user(c, (unsigned int __user *)pc+i))
2322+ printk(KERN_CONT "???????? ");
2323+ else
2324+ printk(KERN_CONT "%08x ", c);
2325+ }
2326+ printk("\n");
2327+}
2328+#endif
2329+
2330 /*
2331 * Check whether the instruction at regs->nip is a store using
2332 * an update addressing form which will update r1.
2333@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334 * indicate errors in DSISR but can validly be set in SRR1.
2335 */
2336 if (trap == 0x400)
2337- error_code &= 0x48200000;
2338+ error_code &= 0x58200000;
2339 else
2340 is_write = error_code & DSISR_ISSTORE;
2341 #else
2342@@ -259,7 +291,7 @@ good_area:
2343 * "undefined". Of those that can be set, this is the only
2344 * one which seems bad.
2345 */
2346- if (error_code & 0x10000000)
2347+ if (error_code & DSISR_GUARDED)
2348 /* Guarded storage error. */
2349 goto bad_area;
2350 #endif /* CONFIG_8xx */
2351@@ -274,7 +306,7 @@ good_area:
2352 * processors use the same I/D cache coherency mechanism
2353 * as embedded.
2354 */
2355- if (error_code & DSISR_PROTFAULT)
2356+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357 goto bad_area;
2358 #endif /* CONFIG_PPC_STD_MMU */
2359
2360@@ -343,6 +375,23 @@ bad_area:
2361 bad_area_nosemaphore:
2362 /* User mode accesses cause a SIGSEGV */
2363 if (user_mode(regs)) {
2364+
2365+#ifdef CONFIG_PAX_PAGEEXEC
2366+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367+#ifdef CONFIG_PPC_STD_MMU
2368+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369+#else
2370+ if (is_exec && regs->nip == address) {
2371+#endif
2372+ switch (pax_handle_fetch_fault(regs)) {
2373+ }
2374+
2375+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376+ do_group_exit(SIGKILL);
2377+ }
2378+ }
2379+#endif
2380+
2381 _exception(SIGSEGV, regs, code, address);
2382 return 0;
2383 }
2384diff -urNp linux-3.0.4/arch/powerpc/mm/mmap_64.c linux-3.0.4/arch/powerpc/mm/mmap_64.c
2385--- linux-3.0.4/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2386+++ linux-3.0.4/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2387@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388 */
2389 if (mmap_is_legacy()) {
2390 mm->mmap_base = TASK_UNMAPPED_BASE;
2391+
2392+#ifdef CONFIG_PAX_RANDMMAP
2393+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2394+ mm->mmap_base += mm->delta_mmap;
2395+#endif
2396+
2397 mm->get_unmapped_area = arch_get_unmapped_area;
2398 mm->unmap_area = arch_unmap_area;
2399 } else {
2400 mm->mmap_base = mmap_base();
2401+
2402+#ifdef CONFIG_PAX_RANDMMAP
2403+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2404+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405+#endif
2406+
2407 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408 mm->unmap_area = arch_unmap_area_topdown;
2409 }
2410diff -urNp linux-3.0.4/arch/powerpc/mm/slice.c linux-3.0.4/arch/powerpc/mm/slice.c
2411--- linux-3.0.4/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412+++ linux-3.0.4/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414 if ((mm->task_size - len) < addr)
2415 return 0;
2416 vma = find_vma(mm, addr);
2417- return (!vma || (addr + len) <= vma->vm_start);
2418+ return check_heap_stack_gap(vma, addr, len);
2419 }
2420
2421 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422@@ -256,7 +256,7 @@ full_search:
2423 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2424 continue;
2425 }
2426- if (!vma || addr + len <= vma->vm_start) {
2427+ if (check_heap_stack_gap(vma, addr, len)) {
2428 /*
2429 * Remember the place where we stopped the search:
2430 */
2431@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432 }
2433 }
2434
2435- addr = mm->mmap_base;
2436- while (addr > len) {
2437+ if (mm->mmap_base < len)
2438+ addr = -ENOMEM;
2439+ else
2440+ addr = mm->mmap_base - len;
2441+
2442+ while (!IS_ERR_VALUE(addr)) {
2443 /* Go down by chunk size */
2444- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446
2447 /* Check for hit with different page size */
2448 mask = slice_range_to_mask(addr, len);
2449@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450 * return with success:
2451 */
2452 vma = find_vma(mm, addr);
2453- if (!vma || (addr + len) <= vma->vm_start) {
2454+ if (check_heap_stack_gap(vma, addr, len)) {
2455 /* remember the address as a hint for next time */
2456 if (use_cache)
2457 mm->free_area_cache = addr;
2458@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459 mm->cached_hole_size = vma->vm_start - addr;
2460
2461 /* try just below the current vma->vm_start */
2462- addr = vma->vm_start;
2463+ addr = skip_heap_stack_gap(vma, len);
2464 }
2465
2466 /*
2467@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468 if (fixed && addr > (mm->task_size - len))
2469 return -EINVAL;
2470
2471+#ifdef CONFIG_PAX_RANDMMAP
2472+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473+ addr = 0;
2474+#endif
2475+
2476 /* If hint, make sure it matches our alignment restrictions */
2477 if (!fixed && addr) {
2478 addr = _ALIGN_UP(addr, 1ul << pshift);
2479diff -urNp linux-3.0.4/arch/s390/include/asm/elf.h linux-3.0.4/arch/s390/include/asm/elf.h
2480--- linux-3.0.4/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2481+++ linux-3.0.4/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2482@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483 the loader. We need to make sure that it is out of the way of the program
2484 that it will "exec", and that there is sufficient room for the brk. */
2485
2486-extern unsigned long randomize_et_dyn(unsigned long base);
2487-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2488+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2489+
2490+#ifdef CONFIG_PAX_ASLR
2491+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492+
2493+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495+#endif
2496
2497 /* This yields a mask that user programs can use to figure out what
2498 instruction set this CPU supports. */
2499@@ -210,7 +216,4 @@ struct linux_binprm;
2500 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501 int arch_setup_additional_pages(struct linux_binprm *, int);
2502
2503-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504-#define arch_randomize_brk arch_randomize_brk
2505-
2506 #endif
2507diff -urNp linux-3.0.4/arch/s390/include/asm/system.h linux-3.0.4/arch/s390/include/asm/system.h
2508--- linux-3.0.4/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2509+++ linux-3.0.4/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2510@@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511 extern void (*_machine_halt)(void);
2512 extern void (*_machine_power_off)(void);
2513
2514-extern unsigned long arch_align_stack(unsigned long sp);
2515+#define arch_align_stack(x) ((x) & ~0xfUL)
2516
2517 static inline int tprot(unsigned long addr)
2518 {
2519diff -urNp linux-3.0.4/arch/s390/include/asm/uaccess.h linux-3.0.4/arch/s390/include/asm/uaccess.h
2520--- linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521+++ linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523 copy_to_user(void __user *to, const void *from, unsigned long n)
2524 {
2525 might_fault();
2526+
2527+ if ((long)n < 0)
2528+ return n;
2529+
2530 if (access_ok(VERIFY_WRITE, to, n))
2531 n = __copy_to_user(to, from, n);
2532 return n;
2533@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534 static inline unsigned long __must_check
2535 __copy_from_user(void *to, const void __user *from, unsigned long n)
2536 {
2537+ if ((long)n < 0)
2538+ return n;
2539+
2540 if (__builtin_constant_p(n) && (n <= 256))
2541 return uaccess.copy_from_user_small(n, from, to);
2542 else
2543@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544 unsigned int sz = __compiletime_object_size(to);
2545
2546 might_fault();
2547+
2548+ if ((long)n < 0)
2549+ return n;
2550+
2551 if (unlikely(sz != -1 && sz < n)) {
2552 copy_from_user_overflow();
2553 return n;
2554diff -urNp linux-3.0.4/arch/s390/kernel/module.c linux-3.0.4/arch/s390/kernel/module.c
2555--- linux-3.0.4/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2556+++ linux-3.0.4/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2557@@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558
2559 /* Increase core size by size of got & plt and set start
2560 offsets for got and plt. */
2561- me->core_size = ALIGN(me->core_size, 4);
2562- me->arch.got_offset = me->core_size;
2563- me->core_size += me->arch.got_size;
2564- me->arch.plt_offset = me->core_size;
2565- me->core_size += me->arch.plt_size;
2566+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567+ me->arch.got_offset = me->core_size_rw;
2568+ me->core_size_rw += me->arch.got_size;
2569+ me->arch.plt_offset = me->core_size_rx;
2570+ me->core_size_rx += me->arch.plt_size;
2571 return 0;
2572 }
2573
2574@@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575 if (info->got_initialized == 0) {
2576 Elf_Addr *gotent;
2577
2578- gotent = me->module_core + me->arch.got_offset +
2579+ gotent = me->module_core_rw + me->arch.got_offset +
2580 info->got_offset;
2581 *gotent = val;
2582 info->got_initialized = 1;
2583@@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 else if (r_type == R_390_GOTENT ||
2585 r_type == R_390_GOTPLTENT)
2586 *(unsigned int *) loc =
2587- (val + (Elf_Addr) me->module_core - loc) >> 1;
2588+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589 else if (r_type == R_390_GOT64 ||
2590 r_type == R_390_GOTPLT64)
2591 *(unsigned long *) loc = val;
2592@@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2594 if (info->plt_initialized == 0) {
2595 unsigned int *ip;
2596- ip = me->module_core + me->arch.plt_offset +
2597+ ip = me->module_core_rx + me->arch.plt_offset +
2598 info->plt_offset;
2599 #ifndef CONFIG_64BIT
2600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601@@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 val - loc + 0xffffUL < 0x1ffffeUL) ||
2603 (r_type == R_390_PLT32DBL &&
2604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605- val = (Elf_Addr) me->module_core +
2606+ val = (Elf_Addr) me->module_core_rx +
2607 me->arch.plt_offset +
2608 info->plt_offset;
2609 val += rela->r_addend - loc;
2610@@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2613 val = val + rela->r_addend -
2614- ((Elf_Addr) me->module_core + me->arch.got_offset);
2615+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616 if (r_type == R_390_GOTOFF16)
2617 *(unsigned short *) loc = val;
2618 else if (r_type == R_390_GOTOFF32)
2619@@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 break;
2621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2623- val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625 rela->r_addend - loc;
2626 if (r_type == R_390_GOTPC)
2627 *(unsigned int *) loc = val;
2628diff -urNp linux-3.0.4/arch/s390/kernel/process.c linux-3.0.4/arch/s390/kernel/process.c
2629--- linux-3.0.4/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2630+++ linux-3.0.4/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2631@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632 }
2633 return 0;
2634 }
2635-
2636-unsigned long arch_align_stack(unsigned long sp)
2637-{
2638- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639- sp -= get_random_int() & ~PAGE_MASK;
2640- return sp & ~0xf;
2641-}
2642-
2643-static inline unsigned long brk_rnd(void)
2644-{
2645- /* 8MB for 32bit, 1GB for 64bit */
2646- if (is_32bit_task())
2647- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648- else
2649- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650-}
2651-
2652-unsigned long arch_randomize_brk(struct mm_struct *mm)
2653-{
2654- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655-
2656- if (ret < mm->brk)
2657- return mm->brk;
2658- return ret;
2659-}
2660-
2661-unsigned long randomize_et_dyn(unsigned long base)
2662-{
2663- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664-
2665- if (!(current->flags & PF_RANDOMIZE))
2666- return base;
2667- if (ret < base)
2668- return base;
2669- return ret;
2670-}
2671diff -urNp linux-3.0.4/arch/s390/kernel/setup.c linux-3.0.4/arch/s390/kernel/setup.c
2672--- linux-3.0.4/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2673+++ linux-3.0.4/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2674@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675 }
2676 early_param("mem", early_parse_mem);
2677
2678-unsigned int user_mode = HOME_SPACE_MODE;
2679+unsigned int user_mode = SECONDARY_SPACE_MODE;
2680 EXPORT_SYMBOL_GPL(user_mode);
2681
2682 static int set_amode_and_uaccess(unsigned long user_amode,
2683diff -urNp linux-3.0.4/arch/s390/mm/mmap.c linux-3.0.4/arch/s390/mm/mmap.c
2684--- linux-3.0.4/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2685+++ linux-3.0.4/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2686@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690+
2691+#ifdef CONFIG_PAX_RANDMMAP
2692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2693+ mm->mmap_base += mm->delta_mmap;
2694+#endif
2695+
2696 mm->get_unmapped_area = arch_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700+
2701+#ifdef CONFIG_PAX_RANDMMAP
2702+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2703+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704+#endif
2705+
2706 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713+
2714+#ifdef CONFIG_PAX_RANDMMAP
2715+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2716+ mm->mmap_base += mm->delta_mmap;
2717+#endif
2718+
2719 mm->get_unmapped_area = s390_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723+
2724+#ifdef CONFIG_PAX_RANDMMAP
2725+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2726+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727+#endif
2728+
2729 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732diff -urNp linux-3.0.4/arch/score/include/asm/system.h linux-3.0.4/arch/score/include/asm/system.h
2733--- linux-3.0.4/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734+++ linux-3.0.4/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735@@ -17,7 +17,7 @@ do { \
2736 #define finish_arch_switch(prev) do {} while (0)
2737
2738 typedef void (*vi_handler_t)(void);
2739-extern unsigned long arch_align_stack(unsigned long sp);
2740+#define arch_align_stack(x) (x)
2741
2742 #define mb() barrier()
2743 #define rmb() barrier()
2744diff -urNp linux-3.0.4/arch/score/kernel/process.c linux-3.0.4/arch/score/kernel/process.c
2745--- linux-3.0.4/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2746+++ linux-3.0.4/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2747@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748
2749 return task_pt_regs(task)->cp0_epc;
2750 }
2751-
2752-unsigned long arch_align_stack(unsigned long sp)
2753-{
2754- return sp;
2755-}
2756diff -urNp linux-3.0.4/arch/sh/mm/mmap.c linux-3.0.4/arch/sh/mm/mmap.c
2757--- linux-3.0.4/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2758+++ linux-3.0.4/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2759@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760 addr = PAGE_ALIGN(addr);
2761
2762 vma = find_vma(mm, addr);
2763- if (TASK_SIZE - len >= addr &&
2764- (!vma || addr + len <= vma->vm_start))
2765+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766 return addr;
2767 }
2768
2769@@ -106,7 +105,7 @@ full_search:
2770 }
2771 return -ENOMEM;
2772 }
2773- if (likely(!vma || addr + len <= vma->vm_start)) {
2774+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2775 /*
2776 * Remember the place where we stopped the search:
2777 */
2778@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779 addr = PAGE_ALIGN(addr);
2780
2781 vma = find_vma(mm, addr);
2782- if (TASK_SIZE - len >= addr &&
2783- (!vma || addr + len <= vma->vm_start))
2784+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785 return addr;
2786 }
2787
2788@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789 /* make sure it can fit in the remaining address space */
2790 if (likely(addr > len)) {
2791 vma = find_vma(mm, addr-len);
2792- if (!vma || addr <= vma->vm_start) {
2793+ if (check_heap_stack_gap(vma, addr - len, len)) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr-len);
2796 }
2797@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798 if (unlikely(mm->mmap_base < len))
2799 goto bottomup;
2800
2801- addr = mm->mmap_base-len;
2802- if (do_colour_align)
2803- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804+ addr = mm->mmap_base - len;
2805
2806 do {
2807+ if (do_colour_align)
2808+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809 /*
2810 * Lookup failure means no vma is above this address,
2811 * else if new region fits below vma->vm_start,
2812 * return with success:
2813 */
2814 vma = find_vma(mm, addr);
2815- if (likely(!vma || addr+len <= vma->vm_start)) {
2816+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2817 /* remember the address as a hint for next time */
2818 return (mm->free_area_cache = addr);
2819 }
2820@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821 mm->cached_hole_size = vma->vm_start - addr;
2822
2823 /* try just below the current vma->vm_start */
2824- addr = vma->vm_start-len;
2825- if (do_colour_align)
2826- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827- } while (likely(len < vma->vm_start));
2828+ addr = skip_heap_stack_gap(vma, len);
2829+ } while (!IS_ERR_VALUE(addr));
2830
2831 bottomup:
2832 /*
2833diff -urNp linux-3.0.4/arch/sparc/include/asm/atomic_64.h linux-3.0.4/arch/sparc/include/asm/atomic_64.h
2834--- linux-3.0.4/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
2835+++ linux-3.0.4/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
2836@@ -14,18 +14,40 @@
2837 #define ATOMIC64_INIT(i) { (i) }
2838
2839 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2840+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841+{
2842+ return v->counter;
2843+}
2844 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2845+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846+{
2847+ return v->counter;
2848+}
2849
2850 #define atomic_set(v, i) (((v)->counter) = i)
2851+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852+{
2853+ v->counter = i;
2854+}
2855 #define atomic64_set(v, i) (((v)->counter) = i)
2856+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857+{
2858+ v->counter = i;
2859+}
2860
2861 extern void atomic_add(int, atomic_t *);
2862+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863 extern void atomic64_add(long, atomic64_t *);
2864+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865 extern void atomic_sub(int, atomic_t *);
2866+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867 extern void atomic64_sub(long, atomic64_t *);
2868+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869
2870 extern int atomic_add_ret(int, atomic_t *);
2871+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872 extern long atomic64_add_ret(long, atomic64_t *);
2873+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874 extern int atomic_sub_ret(int, atomic_t *);
2875 extern long atomic64_sub_ret(long, atomic64_t *);
2876
2877@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879
2880 #define atomic_inc_return(v) atomic_add_ret(1, v)
2881+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882+{
2883+ return atomic_add_ret_unchecked(1, v);
2884+}
2885 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887+{
2888+ return atomic64_add_ret_unchecked(1, v);
2889+}
2890
2891 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893
2894 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896+{
2897+ return atomic_add_ret_unchecked(i, v);
2898+}
2899 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901+{
2902+ return atomic64_add_ret_unchecked(i, v);
2903+}
2904
2905 /*
2906 * atomic_inc_and_test - increment and test
2907@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908 * other cases.
2909 */
2910 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912+{
2913+ return atomic_inc_return_unchecked(v) == 0;
2914+}
2915 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916
2917 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920
2921 #define atomic_inc(v) atomic_add(1, v)
2922+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923+{
2924+ atomic_add_unchecked(1, v);
2925+}
2926 #define atomic64_inc(v) atomic64_add(1, v)
2927+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928+{
2929+ atomic64_add_unchecked(1, v);
2930+}
2931
2932 #define atomic_dec(v) atomic_sub(1, v)
2933+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934+{
2935+ atomic_sub_unchecked(1, v);
2936+}
2937 #define atomic64_dec(v) atomic64_sub(1, v)
2938+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939+{
2940+ atomic64_sub_unchecked(1, v);
2941+}
2942
2943 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945
2946 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948+{
2949+ return cmpxchg(&v->counter, old, new);
2950+}
2951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953+{
2954+ return xchg(&v->counter, new);
2955+}
2956
2957 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958 {
2959- int c, old;
2960+ int c, old, new;
2961 c = atomic_read(v);
2962 for (;;) {
2963- if (unlikely(c == (u)))
2964+ if (unlikely(c == u))
2965 break;
2966- old = atomic_cmpxchg((v), c, c + (a));
2967+
2968+ asm volatile("addcc %2, %0, %0\n"
2969+
2970+#ifdef CONFIG_PAX_REFCOUNT
2971+ "tvs %%icc, 6\n"
2972+#endif
2973+
2974+ : "=r" (new)
2975+ : "0" (c), "ir" (a)
2976+ : "cc");
2977+
2978+ old = atomic_cmpxchg(v, c, new);
2979 if (likely(old == c))
2980 break;
2981 c = old;
2982 }
2983- return c != (u);
2984+ return c != u;
2985 }
2986
2987 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989 #define atomic64_cmpxchg(v, o, n) \
2990 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993+{
2994+ return xchg(&v->counter, new);
2995+}
2996
2997 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998 {
2999- long c, old;
3000+ long c, old, new;
3001 c = atomic64_read(v);
3002 for (;;) {
3003- if (unlikely(c == (u)))
3004+ if (unlikely(c == u))
3005 break;
3006- old = atomic64_cmpxchg((v), c, c + (a));
3007+
3008+ asm volatile("addcc %2, %0, %0\n"
3009+
3010+#ifdef CONFIG_PAX_REFCOUNT
3011+ "tvs %%xcc, 6\n"
3012+#endif
3013+
3014+ : "=r" (new)
3015+ : "0" (c), "ir" (a)
3016+ : "cc");
3017+
3018+ old = atomic64_cmpxchg(v, c, new);
3019 if (likely(old == c))
3020 break;
3021 c = old;
3022 }
3023- return c != (u);
3024+ return c != u;
3025 }
3026
3027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028diff -urNp linux-3.0.4/arch/sparc/include/asm/cache.h linux-3.0.4/arch/sparc/include/asm/cache.h
3029--- linux-3.0.4/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3030+++ linux-3.0.4/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3031@@ -10,7 +10,7 @@
3032 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3033
3034 #define L1_CACHE_SHIFT 5
3035-#define L1_CACHE_BYTES 32
3036+#define L1_CACHE_BYTES 32UL
3037
3038 #ifdef CONFIG_SPARC32
3039 #define SMP_CACHE_BYTES_SHIFT 5
3040diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_32.h linux-3.0.4/arch/sparc/include/asm/elf_32.h
3041--- linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042+++ linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043@@ -114,6 +114,13 @@ typedef struct {
3044
3045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3046
3047+#ifdef CONFIG_PAX_ASLR
3048+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3049+
3050+#define PAX_DELTA_MMAP_LEN 16
3051+#define PAX_DELTA_STACK_LEN 16
3052+#endif
3053+
3054 /* This yields a mask that user programs can use to figure out what
3055 instruction set this cpu supports. This can NOT be done in userspace
3056 on Sparc. */
3057diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_64.h linux-3.0.4/arch/sparc/include/asm/elf_64.h
3058--- linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-09-02 18:11:21.000000000 -0400
3059+++ linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060@@ -180,6 +180,13 @@ typedef struct {
3061 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3062 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063
3064+#ifdef CONFIG_PAX_ASLR
3065+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066+
3067+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069+#endif
3070+
3071 extern unsigned long sparc64_elf_hwcap;
3072 #define ELF_HWCAP sparc64_elf_hwcap
3073
3074diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtable_32.h linux-3.0.4/arch/sparc/include/asm/pgtable_32.h
3075--- linux-3.0.4/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3076+++ linux-3.0.4/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3077@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078 BTFIXUPDEF_INT(page_none)
3079 BTFIXUPDEF_INT(page_copy)
3080 BTFIXUPDEF_INT(page_readonly)
3081+
3082+#ifdef CONFIG_PAX_PAGEEXEC
3083+BTFIXUPDEF_INT(page_shared_noexec)
3084+BTFIXUPDEF_INT(page_copy_noexec)
3085+BTFIXUPDEF_INT(page_readonly_noexec)
3086+#endif
3087+
3088 BTFIXUPDEF_INT(page_kernel)
3089
3090 #define PMD_SHIFT SUN4C_PMD_SHIFT
3091@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3093 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3094
3095+#ifdef CONFIG_PAX_PAGEEXEC
3096+extern pgprot_t PAGE_SHARED_NOEXEC;
3097+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3098+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099+#else
3100+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3101+# define PAGE_COPY_NOEXEC PAGE_COPY
3102+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3103+#endif
3104+
3105 extern unsigned long page_kernel;
3106
3107 #ifdef MODULE
3108diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h
3109--- linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3110+++ linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3111@@ -115,6 +115,13 @@
3112 SRMMU_EXEC | SRMMU_REF)
3113 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114 SRMMU_EXEC | SRMMU_REF)
3115+
3116+#ifdef CONFIG_PAX_PAGEEXEC
3117+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120+#endif
3121+
3122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123 SRMMU_DIRTY | SRMMU_REF)
3124
3125diff -urNp linux-3.0.4/arch/sparc/include/asm/spinlock_64.h linux-3.0.4/arch/sparc/include/asm/spinlock_64.h
3126--- linux-3.0.4/arch/sparc/include/asm/spinlock_64.h 2011-07-21 22:17:23.000000000 -0400
3127+++ linux-3.0.4/arch/sparc/include/asm/spinlock_64.h 2011-08-23 21:47:55.000000000 -0400
3128@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129
3130 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131
3132-static void inline arch_read_lock(arch_rwlock_t *lock)
3133+static inline void arch_read_lock(arch_rwlock_t *lock)
3134 {
3135 unsigned long tmp1, tmp2;
3136
3137 __asm__ __volatile__ (
3138 "1: ldsw [%2], %0\n"
3139 " brlz,pn %0, 2f\n"
3140-"4: add %0, 1, %1\n"
3141+"4: addcc %0, 1, %1\n"
3142+
3143+#ifdef CONFIG_PAX_REFCOUNT
3144+" tvs %%icc, 6\n"
3145+#endif
3146+
3147 " cas [%2], %0, %1\n"
3148 " cmp %0, %1\n"
3149 " bne,pn %%icc, 1b\n"
3150@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151 " .previous"
3152 : "=&r" (tmp1), "=&r" (tmp2)
3153 : "r" (lock)
3154- : "memory");
3155+ : "memory", "cc");
3156 }
3157
3158-static int inline arch_read_trylock(arch_rwlock_t *lock)
3159+static inline int arch_read_trylock(arch_rwlock_t *lock)
3160 {
3161 int tmp1, tmp2;
3162
3163@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164 "1: ldsw [%2], %0\n"
3165 " brlz,a,pn %0, 2f\n"
3166 " mov 0, %0\n"
3167-" add %0, 1, %1\n"
3168+" addcc %0, 1, %1\n"
3169+
3170+#ifdef CONFIG_PAX_REFCOUNT
3171+" tvs %%icc, 6\n"
3172+#endif
3173+
3174 " cas [%2], %0, %1\n"
3175 " cmp %0, %1\n"
3176 " bne,pn %%icc, 1b\n"
3177@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178 return tmp1;
3179 }
3180
3181-static void inline arch_read_unlock(arch_rwlock_t *lock)
3182+static inline void arch_read_unlock(arch_rwlock_t *lock)
3183 {
3184 unsigned long tmp1, tmp2;
3185
3186 __asm__ __volatile__(
3187 "1: lduw [%2], %0\n"
3188-" sub %0, 1, %1\n"
3189+" subcc %0, 1, %1\n"
3190+
3191+#ifdef CONFIG_PAX_REFCOUNT
3192+" tvs %%icc, 6\n"
3193+#endif
3194+
3195 " cas [%2], %0, %1\n"
3196 " cmp %0, %1\n"
3197 " bne,pn %%xcc, 1b\n"
3198@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199 : "memory");
3200 }
3201
3202-static void inline arch_write_lock(arch_rwlock_t *lock)
3203+static inline void arch_write_lock(arch_rwlock_t *lock)
3204 {
3205 unsigned long mask, tmp1, tmp2;
3206
3207@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208 : "memory");
3209 }
3210
3211-static void inline arch_write_unlock(arch_rwlock_t *lock)
3212+static inline void arch_write_unlock(arch_rwlock_t *lock)
3213 {
3214 __asm__ __volatile__(
3215 " stw %%g0, [%0]"
3216@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217 : "memory");
3218 }
3219
3220-static int inline arch_write_trylock(arch_rwlock_t *lock)
3221+static inline int arch_write_trylock(arch_rwlock_t *lock)
3222 {
3223 unsigned long mask, tmp1, tmp2, result;
3224
3225diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_32.h linux-3.0.4/arch/sparc/include/asm/thread_info_32.h
3226--- linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227+++ linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228@@ -50,6 +50,8 @@ struct thread_info {
3229 unsigned long w_saved;
3230
3231 struct restart_block restart_block;
3232+
3233+ unsigned long lowest_stack;
3234 };
3235
3236 /*
3237diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_64.h linux-3.0.4/arch/sparc/include/asm/thread_info_64.h
3238--- linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239+++ linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240@@ -63,6 +63,8 @@ struct thread_info {
3241 struct pt_regs *kern_una_regs;
3242 unsigned int kern_una_insn;
3243
3244+ unsigned long lowest_stack;
3245+
3246 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3247 };
3248
3249diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_32.h linux-3.0.4/arch/sparc/include/asm/uaccess_32.h
3250--- linux-3.0.4/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3251+++ linux-3.0.4/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3252@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253
3254 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256- if (n && __access_ok((unsigned long) to, n))
3257+ if ((long)n < 0)
3258+ return n;
3259+
3260+ if (n && __access_ok((unsigned long) to, n)) {
3261+ if (!__builtin_constant_p(n))
3262+ check_object_size(from, n, true);
3263 return __copy_user(to, (__force void __user *) from, n);
3264- else
3265+ } else
3266 return n;
3267 }
3268
3269 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270 {
3271+ if ((long)n < 0)
3272+ return n;
3273+
3274+ if (!__builtin_constant_p(n))
3275+ check_object_size(from, n, true);
3276+
3277 return __copy_user(to, (__force void __user *) from, n);
3278 }
3279
3280 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281 {
3282- if (n && __access_ok((unsigned long) from, n))
3283+ if ((long)n < 0)
3284+ return n;
3285+
3286+ if (n && __access_ok((unsigned long) from, n)) {
3287+ if (!__builtin_constant_p(n))
3288+ check_object_size(to, n, false);
3289 return __copy_user((__force void __user *) to, from, n);
3290- else
3291+ } else
3292 return n;
3293 }
3294
3295 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296 {
3297+ if ((long)n < 0)
3298+ return n;
3299+
3300 return __copy_user((__force void __user *) to, from, n);
3301 }
3302
3303diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_64.h linux-3.0.4/arch/sparc/include/asm/uaccess_64.h
3304--- linux-3.0.4/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3305+++ linux-3.0.4/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3306@@ -10,6 +10,7 @@
3307 #include <linux/compiler.h>
3308 #include <linux/string.h>
3309 #include <linux/thread_info.h>
3310+#include <linux/kernel.h>
3311 #include <asm/asi.h>
3312 #include <asm/system.h>
3313 #include <asm/spitfire.h>
3314@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315 static inline unsigned long __must_check
3316 copy_from_user(void *to, const void __user *from, unsigned long size)
3317 {
3318- unsigned long ret = ___copy_from_user(to, from, size);
3319+ unsigned long ret;
3320
3321+ if ((long)size < 0 || size > INT_MAX)
3322+ return size;
3323+
3324+ if (!__builtin_constant_p(size))
3325+ check_object_size(to, size, false);
3326+
3327+ ret = ___copy_from_user(to, from, size);
3328 if (unlikely(ret))
3329 ret = copy_from_user_fixup(to, from, size);
3330
3331@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332 static inline unsigned long __must_check
3333 copy_to_user(void __user *to, const void *from, unsigned long size)
3334 {
3335- unsigned long ret = ___copy_to_user(to, from, size);
3336+ unsigned long ret;
3337+
3338+ if ((long)size < 0 || size > INT_MAX)
3339+ return size;
3340+
3341+ if (!__builtin_constant_p(size))
3342+ check_object_size(from, size, true);
3343
3344+ ret = ___copy_to_user(to, from, size);
3345 if (unlikely(ret))
3346 ret = copy_to_user_fixup(to, from, size);
3347 return ret;
3348diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess.h linux-3.0.4/arch/sparc/include/asm/uaccess.h
3349--- linux-3.0.4/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3350+++ linux-3.0.4/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3351@@ -1,5 +1,13 @@
3352 #ifndef ___ASM_SPARC_UACCESS_H
3353 #define ___ASM_SPARC_UACCESS_H
3354+
3355+#ifdef __KERNEL__
3356+#ifndef __ASSEMBLY__
3357+#include <linux/types.h>
3358+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359+#endif
3360+#endif
3361+
3362 #if defined(__sparc__) && defined(__arch64__)
3363 #include <asm/uaccess_64.h>
3364 #else
3365diff -urNp linux-3.0.4/arch/sparc/kernel/Makefile linux-3.0.4/arch/sparc/kernel/Makefile
3366--- linux-3.0.4/arch/sparc/kernel/Makefile 2011-07-21 22:17:23.000000000 -0400
3367+++ linux-3.0.4/arch/sparc/kernel/Makefile 2011-08-23 21:47:55.000000000 -0400
3368@@ -3,7 +3,7 @@
3369 #
3370
3371 asflags-y := -ansi
3372-ccflags-y := -Werror
3373+#ccflags-y := -Werror
3374
3375 extra-y := head_$(BITS).o
3376 extra-y += init_task.o
3377diff -urNp linux-3.0.4/arch/sparc/kernel/process_32.c linux-3.0.4/arch/sparc/kernel/process_32.c
3378--- linux-3.0.4/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3379+++ linux-3.0.4/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3380@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381 rw->ins[4], rw->ins[5],
3382 rw->ins[6],
3383 rw->ins[7]);
3384- printk("%pS\n", (void *) rw->ins[7]);
3385+ printk("%pA\n", (void *) rw->ins[7]);
3386 rw = (struct reg_window32 *) rw->ins[6];
3387 }
3388 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390
3391 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3392 r->psr, r->pc, r->npc, r->y, print_tainted());
3393- printk("PC: <%pS>\n", (void *) r->pc);
3394+ printk("PC: <%pA>\n", (void *) r->pc);
3395 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3396 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3399 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403
3404 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3405 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407 rw = (struct reg_window32 *) fp;
3408 pc = rw->ins[7];
3409 printk("[%08lx : ", pc);
3410- printk("%pS ] ", (void *) pc);
3411+ printk("%pA ] ", (void *) pc);
3412 fp = rw->ins[6];
3413 } while (++count < 16);
3414 printk("\n");
3415diff -urNp linux-3.0.4/arch/sparc/kernel/process_64.c linux-3.0.4/arch/sparc/kernel/process_64.c
3416--- linux-3.0.4/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3417+++ linux-3.0.4/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3418@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421 if (regs->tstate & TSTATE_PRIV)
3422- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424 }
3425
3426 void show_regs(struct pt_regs *regs)
3427 {
3428 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3429 regs->tpc, regs->tnpc, regs->y, print_tainted());
3430- printk("TPC: <%pS>\n", (void *) regs->tpc);
3431+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3432 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434 regs->u_regs[3]);
3435@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438 regs->u_regs[15]);
3439- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441 show_regwindow(regs);
3442 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443 }
3444@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445 ((tp && tp->task) ? tp->task->pid : -1));
3446
3447 if (gp->tstate & TSTATE_PRIV) {
3448- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450 (void *) gp->tpc,
3451 (void *) gp->o7,
3452 (void *) gp->i7,
3453diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c
3454--- linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3455+++ linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3456@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457 if (ARCH_SUN4C && len > 0x20000000)
3458 return -ENOMEM;
3459 if (!addr)
3460- addr = TASK_UNMAPPED_BASE;
3461+ addr = current->mm->mmap_base;
3462
3463 if (flags & MAP_SHARED)
3464 addr = COLOUR_ALIGN(addr);
3465@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466 }
3467 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468 return -ENOMEM;
3469- if (!vmm || addr + len <= vmm->vm_start)
3470+ if (check_heap_stack_gap(vmm, addr, len))
3471 return addr;
3472 addr = vmm->vm_end;
3473 if (flags & MAP_SHARED)
3474diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c
3475--- linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3476+++ linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3477@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478 /* We do not accept a shared mapping if it would violate
3479 * cache aliasing constraints.
3480 */
3481- if ((flags & MAP_SHARED) &&
3482+ if ((filp || (flags & MAP_SHARED)) &&
3483 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484 return -EINVAL;
3485 return addr;
3486@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487 if (filp || (flags & MAP_SHARED))
3488 do_color_align = 1;
3489
3490+#ifdef CONFIG_PAX_RANDMMAP
3491+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492+#endif
3493+
3494 if (addr) {
3495 if (do_color_align)
3496 addr = COLOUR_ALIGN(addr, pgoff);
3497@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498 addr = PAGE_ALIGN(addr);
3499
3500 vma = find_vma(mm, addr);
3501- if (task_size - len >= addr &&
3502- (!vma || addr + len <= vma->vm_start))
3503+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504 return addr;
3505 }
3506
3507 if (len > mm->cached_hole_size) {
3508- start_addr = addr = mm->free_area_cache;
3509+ start_addr = addr = mm->free_area_cache;
3510 } else {
3511- start_addr = addr = TASK_UNMAPPED_BASE;
3512+ start_addr = addr = mm->mmap_base;
3513 mm->cached_hole_size = 0;
3514 }
3515
3516@@ -174,14 +177,14 @@ full_search:
3517 vma = find_vma(mm, VA_EXCLUDE_END);
3518 }
3519 if (unlikely(task_size < addr)) {
3520- if (start_addr != TASK_UNMAPPED_BASE) {
3521- start_addr = addr = TASK_UNMAPPED_BASE;
3522+ if (start_addr != mm->mmap_base) {
3523+ start_addr = addr = mm->mmap_base;
3524 mm->cached_hole_size = 0;
3525 goto full_search;
3526 }
3527 return -ENOMEM;
3528 }
3529- if (likely(!vma || addr + len <= vma->vm_start)) {
3530+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3531 /*
3532 * Remember the place where we stopped the search:
3533 */
3534@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535 /* We do not accept a shared mapping if it would violate
3536 * cache aliasing constraints.
3537 */
3538- if ((flags & MAP_SHARED) &&
3539+ if ((filp || (flags & MAP_SHARED)) &&
3540 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541 return -EINVAL;
3542 return addr;
3543@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544 addr = PAGE_ALIGN(addr);
3545
3546 vma = find_vma(mm, addr);
3547- if (task_size - len >= addr &&
3548- (!vma || addr + len <= vma->vm_start))
3549+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550 return addr;
3551 }
3552
3553@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554 /* make sure it can fit in the remaining address space */
3555 if (likely(addr > len)) {
3556 vma = find_vma(mm, addr-len);
3557- if (!vma || addr <= vma->vm_start) {
3558+ if (check_heap_stack_gap(vma, addr - len, len)) {
3559 /* remember the address as a hint for next time */
3560 return (mm->free_area_cache = addr-len);
3561 }
3562@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563 if (unlikely(mm->mmap_base < len))
3564 goto bottomup;
3565
3566- addr = mm->mmap_base-len;
3567- if (do_color_align)
3568- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569+ addr = mm->mmap_base - len;
3570
3571 do {
3572+ if (do_color_align)
3573+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574 /*
3575 * Lookup failure means no vma is above this address,
3576 * else if new region fits below vma->vm_start,
3577 * return with success:
3578 */
3579 vma = find_vma(mm, addr);
3580- if (likely(!vma || addr+len <= vma->vm_start)) {
3581+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3582 /* remember the address as a hint for next time */
3583 return (mm->free_area_cache = addr);
3584 }
3585@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586 mm->cached_hole_size = vma->vm_start - addr;
3587
3588 /* try just below the current vma->vm_start */
3589- addr = vma->vm_start-len;
3590- if (do_color_align)
3591- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592- } while (likely(len < vma->vm_start));
3593+ addr = skip_heap_stack_gap(vma, len);
3594+ } while (!IS_ERR_VALUE(addr));
3595
3596 bottomup:
3597 /*
3598@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599 gap == RLIM_INFINITY ||
3600 sysctl_legacy_va_layout) {
3601 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602+
3603+#ifdef CONFIG_PAX_RANDMMAP
3604+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3605+ mm->mmap_base += mm->delta_mmap;
3606+#endif
3607+
3608 mm->get_unmapped_area = arch_get_unmapped_area;
3609 mm->unmap_area = arch_unmap_area;
3610 } else {
3611@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612 gap = (task_size / 6 * 5);
3613
3614 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615+
3616+#ifdef CONFIG_PAX_RANDMMAP
3617+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3618+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619+#endif
3620+
3621 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622 mm->unmap_area = arch_unmap_area_topdown;
3623 }
3624diff -urNp linux-3.0.4/arch/sparc/kernel/traps_32.c linux-3.0.4/arch/sparc/kernel/traps_32.c
3625--- linux-3.0.4/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3626+++ linux-3.0.4/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3627@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630
3631+extern void gr_handle_kernel_exploit(void);
3632+
3633 void die_if_kernel(char *str, struct pt_regs *regs)
3634 {
3635 static int die_counter;
3636@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637 count++ < 30 &&
3638 (((unsigned long) rw) >= PAGE_OFFSET) &&
3639 !(((unsigned long) rw) & 0x7)) {
3640- printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642 (void *) rw->ins[7]);
3643 rw = (struct reg_window32 *)rw->ins[6];
3644 }
3645 }
3646 printk("Instruction DUMP:");
3647 instruction_dump ((unsigned long *) regs->pc);
3648- if(regs->psr & PSR_PS)
3649+ if(regs->psr & PSR_PS) {
3650+ gr_handle_kernel_exploit();
3651 do_exit(SIGKILL);
3652+ }
3653 do_exit(SIGSEGV);
3654 }
3655
3656diff -urNp linux-3.0.4/arch/sparc/kernel/traps_64.c linux-3.0.4/arch/sparc/kernel/traps_64.c
3657--- linux-3.0.4/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3658+++ linux-3.0.4/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3659@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660 i + 1,
3661 p->trapstack[i].tstate, p->trapstack[i].tpc,
3662 p->trapstack[i].tnpc, p->trapstack[i].tt);
3663- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665 }
3666 }
3667
3668@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669
3670 lvl -= 0x100;
3671 if (regs->tstate & TSTATE_PRIV) {
3672+
3673+#ifdef CONFIG_PAX_REFCOUNT
3674+ if (lvl == 6)
3675+ pax_report_refcount_overflow(regs);
3676+#endif
3677+
3678 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679 die_if_kernel(buffer, regs);
3680 }
3681@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683 {
3684 char buffer[32];
3685-
3686+
3687 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688 0, lvl, SIGTRAP) == NOTIFY_STOP)
3689 return;
3690
3691+#ifdef CONFIG_PAX_REFCOUNT
3692+ if (lvl == 6)
3693+ pax_report_refcount_overflow(regs);
3694+#endif
3695+
3696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697
3698 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701 printk("%s" "ERROR(%d): ",
3702 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703- printk("TPC<%pS>\n", (void *) regs->tpc);
3704+ printk("TPC<%pA>\n", (void *) regs->tpc);
3705 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709 smp_processor_id(),
3710 (type & 0x1) ? 'I' : 'D',
3711 regs->tpc);
3712- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714 panic("Irrecoverable Cheetah+ parity error.");
3715 }
3716
3717@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718 smp_processor_id(),
3719 (type & 0x1) ? 'I' : 'D',
3720 regs->tpc);
3721- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723 }
3724
3725 struct sun4v_error_entry {
3726@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727
3728 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729 regs->tpc, tl);
3730- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735 (void *) regs->u_regs[UREG_I7]);
3736 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737 "pte[%lx] error[%lx]\n",
3738@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739
3740 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741 regs->tpc, tl);
3742- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747 (void *) regs->u_regs[UREG_I7]);
3748 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749 "pte[%lx] error[%lx]\n",
3750@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751 fp = (unsigned long)sf->fp + STACK_BIAS;
3752 }
3753
3754- printk(" [%016lx] %pS\n", pc, (void *) pc);
3755+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758 int index = tsk->curr_ret_stack;
3759 if (tsk->ret_stack && index >= graph) {
3760 pc = tsk->ret_stack[index - graph].ret;
3761- printk(" [%016lx] %pS\n", pc, (void *) pc);
3762+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3763 graph++;
3764 }
3765 }
3766@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767 return (struct reg_window *) (fp + STACK_BIAS);
3768 }
3769
3770+extern void gr_handle_kernel_exploit(void);
3771+
3772 void die_if_kernel(char *str, struct pt_regs *regs)
3773 {
3774 static int die_counter;
3775@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776 while (rw &&
3777 count++ < 30 &&
3778 kstack_valid(tp, (unsigned long) rw)) {
3779- printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781 (void *) rw->ins[7]);
3782
3783 rw = kernel_stack_up(rw);
3784@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785 }
3786 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787 }
3788- if (regs->tstate & TSTATE_PRIV)
3789+ if (regs->tstate & TSTATE_PRIV) {
3790+ gr_handle_kernel_exploit();
3791 do_exit(SIGKILL);
3792+ }
3793 do_exit(SIGSEGV);
3794 }
3795 EXPORT_SYMBOL(die_if_kernel);
3796diff -urNp linux-3.0.4/arch/sparc/kernel/unaligned_64.c linux-3.0.4/arch/sparc/kernel/unaligned_64.c
3797--- linux-3.0.4/arch/sparc/kernel/unaligned_64.c 2011-09-02 18:11:21.000000000 -0400
3798+++ linux-3.0.4/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
3799@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801
3802 if (__ratelimit(&ratelimit)) {
3803- printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805 regs->tpc, (void *) regs->tpc);
3806 }
3807 }
3808diff -urNp linux-3.0.4/arch/sparc/lib/atomic_64.S linux-3.0.4/arch/sparc/lib/atomic_64.S
3809--- linux-3.0.4/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
3810+++ linux-3.0.4/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
3811@@ -18,7 +18,12 @@
3812 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813 BACKOFF_SETUP(%o2)
3814 1: lduw [%o1], %g1
3815- add %g1, %o0, %g7
3816+ addcc %g1, %o0, %g7
3817+
3818+#ifdef CONFIG_PAX_REFCOUNT
3819+ tvs %icc, 6
3820+#endif
3821+
3822 cas [%o1], %g1, %g7
3823 cmp %g1, %g7
3824 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3825@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826 2: BACKOFF_SPIN(%o2, %o3, 1b)
3827 .size atomic_add, .-atomic_add
3828
3829+ .globl atomic_add_unchecked
3830+ .type atomic_add_unchecked,#function
3831+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832+ BACKOFF_SETUP(%o2)
3833+1: lduw [%o1], %g1
3834+ add %g1, %o0, %g7
3835+ cas [%o1], %g1, %g7
3836+ cmp %g1, %g7
3837+ bne,pn %icc, 2f
3838+ nop
3839+ retl
3840+ nop
3841+2: BACKOFF_SPIN(%o2, %o3, 1b)
3842+ .size atomic_add_unchecked, .-atomic_add_unchecked
3843+
3844 .globl atomic_sub
3845 .type atomic_sub,#function
3846 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847 BACKOFF_SETUP(%o2)
3848 1: lduw [%o1], %g1
3849- sub %g1, %o0, %g7
3850+ subcc %g1, %o0, %g7
3851+
3852+#ifdef CONFIG_PAX_REFCOUNT
3853+ tvs %icc, 6
3854+#endif
3855+
3856 cas [%o1], %g1, %g7
3857 cmp %g1, %g7
3858 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3859@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860 2: BACKOFF_SPIN(%o2, %o3, 1b)
3861 .size atomic_sub, .-atomic_sub
3862
3863+ .globl atomic_sub_unchecked
3864+ .type atomic_sub_unchecked,#function
3865+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866+ BACKOFF_SETUP(%o2)
3867+1: lduw [%o1], %g1
3868+ sub %g1, %o0, %g7
3869+ cas [%o1], %g1, %g7
3870+ cmp %g1, %g7
3871+ bne,pn %icc, 2f
3872+ nop
3873+ retl
3874+ nop
3875+2: BACKOFF_SPIN(%o2, %o3, 1b)
3876+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
3877+
3878 .globl atomic_add_ret
3879 .type atomic_add_ret,#function
3880 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881 BACKOFF_SETUP(%o2)
3882 1: lduw [%o1], %g1
3883- add %g1, %o0, %g7
3884+ addcc %g1, %o0, %g7
3885+
3886+#ifdef CONFIG_PAX_REFCOUNT
3887+ tvs %icc, 6
3888+#endif
3889+
3890 cas [%o1], %g1, %g7
3891 cmp %g1, %g7
3892 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3893@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3894 2: BACKOFF_SPIN(%o2, %o3, 1b)
3895 .size atomic_add_ret, .-atomic_add_ret
3896
3897+ .globl atomic_add_ret_unchecked
3898+ .type atomic_add_ret_unchecked,#function
3899+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900+ BACKOFF_SETUP(%o2)
3901+1: lduw [%o1], %g1
3902+ addcc %g1, %o0, %g7
3903+ cas [%o1], %g1, %g7
3904+ cmp %g1, %g7
3905+ bne,pn %icc, 2f
3906+ add %g7, %o0, %g7
3907+ sra %g7, 0, %o0
3908+ retl
3909+ nop
3910+2: BACKOFF_SPIN(%o2, %o3, 1b)
3911+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912+
3913 .globl atomic_sub_ret
3914 .type atomic_sub_ret,#function
3915 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916 BACKOFF_SETUP(%o2)
3917 1: lduw [%o1], %g1
3918- sub %g1, %o0, %g7
3919+ subcc %g1, %o0, %g7
3920+
3921+#ifdef CONFIG_PAX_REFCOUNT
3922+ tvs %icc, 6
3923+#endif
3924+
3925 cas [%o1], %g1, %g7
3926 cmp %g1, %g7
3927 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3928@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3929 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930 BACKOFF_SETUP(%o2)
3931 1: ldx [%o1], %g1
3932- add %g1, %o0, %g7
3933+ addcc %g1, %o0, %g7
3934+
3935+#ifdef CONFIG_PAX_REFCOUNT
3936+ tvs %xcc, 6
3937+#endif
3938+
3939 casx [%o1], %g1, %g7
3940 cmp %g1, %g7
3941 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3942@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3943 2: BACKOFF_SPIN(%o2, %o3, 1b)
3944 .size atomic64_add, .-atomic64_add
3945
3946+ .globl atomic64_add_unchecked
3947+ .type atomic64_add_unchecked,#function
3948+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949+ BACKOFF_SETUP(%o2)
3950+1: ldx [%o1], %g1
3951+ addcc %g1, %o0, %g7
3952+ casx [%o1], %g1, %g7
3953+ cmp %g1, %g7
3954+ bne,pn %xcc, 2f
3955+ nop
3956+ retl
3957+ nop
3958+2: BACKOFF_SPIN(%o2, %o3, 1b)
3959+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
3960+
3961 .globl atomic64_sub
3962 .type atomic64_sub,#function
3963 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964 BACKOFF_SETUP(%o2)
3965 1: ldx [%o1], %g1
3966- sub %g1, %o0, %g7
3967+ subcc %g1, %o0, %g7
3968+
3969+#ifdef CONFIG_PAX_REFCOUNT
3970+ tvs %xcc, 6
3971+#endif
3972+
3973 casx [%o1], %g1, %g7
3974 cmp %g1, %g7
3975 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3976@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3977 2: BACKOFF_SPIN(%o2, %o3, 1b)
3978 .size atomic64_sub, .-atomic64_sub
3979
3980+ .globl atomic64_sub_unchecked
3981+ .type atomic64_sub_unchecked,#function
3982+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983+ BACKOFF_SETUP(%o2)
3984+1: ldx [%o1], %g1
3985+ subcc %g1, %o0, %g7
3986+ casx [%o1], %g1, %g7
3987+ cmp %g1, %g7
3988+ bne,pn %xcc, 2f
3989+ nop
3990+ retl
3991+ nop
3992+2: BACKOFF_SPIN(%o2, %o3, 1b)
3993+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994+
3995 .globl atomic64_add_ret
3996 .type atomic64_add_ret,#function
3997 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998 BACKOFF_SETUP(%o2)
3999 1: ldx [%o1], %g1
4000- add %g1, %o0, %g7
4001+ addcc %g1, %o0, %g7
4002+
4003+#ifdef CONFIG_PAX_REFCOUNT
4004+ tvs %xcc, 6
4005+#endif
4006+
4007 casx [%o1], %g1, %g7
4008 cmp %g1, %g7
4009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4010@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011 2: BACKOFF_SPIN(%o2, %o3, 1b)
4012 .size atomic64_add_ret, .-atomic64_add_ret
4013
4014+ .globl atomic64_add_ret_unchecked
4015+ .type atomic64_add_ret_unchecked,#function
4016+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017+ BACKOFF_SETUP(%o2)
4018+1: ldx [%o1], %g1
4019+ addcc %g1, %o0, %g7
4020+ casx [%o1], %g1, %g7
4021+ cmp %g1, %g7
4022+ bne,pn %xcc, 2f
4023+ add %g7, %o0, %g7
4024+ mov %g7, %o0
4025+ retl
4026+ nop
4027+2: BACKOFF_SPIN(%o2, %o3, 1b)
4028+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029+
4030 .globl atomic64_sub_ret
4031 .type atomic64_sub_ret,#function
4032 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033 BACKOFF_SETUP(%o2)
4034 1: ldx [%o1], %g1
4035- sub %g1, %o0, %g7
4036+ subcc %g1, %o0, %g7
4037+
4038+#ifdef CONFIG_PAX_REFCOUNT
4039+ tvs %xcc, 6
4040+#endif
4041+
4042 casx [%o1], %g1, %g7
4043 cmp %g1, %g7
4044 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4045diff -urNp linux-3.0.4/arch/sparc/lib/ksyms.c linux-3.0.4/arch/sparc/lib/ksyms.c
4046--- linux-3.0.4/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4047+++ linux-3.0.4/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4048@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049
4050 /* Atomic counter implementation. */
4051 EXPORT_SYMBOL(atomic_add);
4052+EXPORT_SYMBOL(atomic_add_unchecked);
4053 EXPORT_SYMBOL(atomic_add_ret);
4054+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055 EXPORT_SYMBOL(atomic_sub);
4056+EXPORT_SYMBOL(atomic_sub_unchecked);
4057 EXPORT_SYMBOL(atomic_sub_ret);
4058 EXPORT_SYMBOL(atomic64_add);
4059+EXPORT_SYMBOL(atomic64_add_unchecked);
4060 EXPORT_SYMBOL(atomic64_add_ret);
4061+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062 EXPORT_SYMBOL(atomic64_sub);
4063+EXPORT_SYMBOL(atomic64_sub_unchecked);
4064 EXPORT_SYMBOL(atomic64_sub_ret);
4065
4066 /* Atomic bit operations. */
4067diff -urNp linux-3.0.4/arch/sparc/lib/Makefile linux-3.0.4/arch/sparc/lib/Makefile
4068--- linux-3.0.4/arch/sparc/lib/Makefile 2011-09-02 18:11:21.000000000 -0400
4069+++ linux-3.0.4/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070@@ -2,7 +2,7 @@
4071 #
4072
4073 asflags-y := -ansi -DST_DIV0=0x02
4074-ccflags-y := -Werror
4075+#ccflags-y := -Werror
4076
4077 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079diff -urNp linux-3.0.4/arch/sparc/Makefile linux-3.0.4/arch/sparc/Makefile
4080--- linux-3.0.4/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
4081+++ linux-3.0.4/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
4082@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4083 # Export what is needed by arch/sparc/boot/Makefile
4084 export VMLINUX_INIT VMLINUX_MAIN
4085 VMLINUX_INIT := $(head-y) $(init-y)
4086-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089 VMLINUX_MAIN += $(drivers-y) $(net-y)
4090
4091diff -urNp linux-3.0.4/arch/sparc/mm/fault_32.c linux-3.0.4/arch/sparc/mm/fault_32.c
4092--- linux-3.0.4/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4093+++ linux-3.0.4/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4094@@ -22,6 +22,9 @@
4095 #include <linux/interrupt.h>
4096 #include <linux/module.h>
4097 #include <linux/kdebug.h>
4098+#include <linux/slab.h>
4099+#include <linux/pagemap.h>
4100+#include <linux/compiler.h>
4101
4102 #include <asm/system.h>
4103 #include <asm/page.h>
4104@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105 return safe_compute_effective_address(regs, insn);
4106 }
4107
4108+#ifdef CONFIG_PAX_PAGEEXEC
4109+#ifdef CONFIG_PAX_DLRESOLVE
4110+static void pax_emuplt_close(struct vm_area_struct *vma)
4111+{
4112+ vma->vm_mm->call_dl_resolve = 0UL;
4113+}
4114+
4115+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116+{
4117+ unsigned int *kaddr;
4118+
4119+ vmf->page = alloc_page(GFP_HIGHUSER);
4120+ if (!vmf->page)
4121+ return VM_FAULT_OOM;
4122+
4123+ kaddr = kmap(vmf->page);
4124+ memset(kaddr, 0, PAGE_SIZE);
4125+ kaddr[0] = 0x9DE3BFA8U; /* save */
4126+ flush_dcache_page(vmf->page);
4127+ kunmap(vmf->page);
4128+ return VM_FAULT_MAJOR;
4129+}
4130+
4131+static const struct vm_operations_struct pax_vm_ops = {
4132+ .close = pax_emuplt_close,
4133+ .fault = pax_emuplt_fault
4134+};
4135+
4136+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137+{
4138+ int ret;
4139+
4140+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4141+ vma->vm_mm = current->mm;
4142+ vma->vm_start = addr;
4143+ vma->vm_end = addr + PAGE_SIZE;
4144+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146+ vma->vm_ops = &pax_vm_ops;
4147+
4148+ ret = insert_vm_struct(current->mm, vma);
4149+ if (ret)
4150+ return ret;
4151+
4152+ ++current->mm->total_vm;
4153+ return 0;
4154+}
4155+#endif
4156+
4157+/*
4158+ * PaX: decide what to do with offenders (regs->pc = fault address)
4159+ *
4160+ * returns 1 when task should be killed
4161+ * 2 when patched PLT trampoline was detected
4162+ * 3 when unpatched PLT trampoline was detected
4163+ */
4164+static int pax_handle_fetch_fault(struct pt_regs *regs)
4165+{
4166+
4167+#ifdef CONFIG_PAX_EMUPLT
4168+ int err;
4169+
4170+ do { /* PaX: patched PLT emulation #1 */
4171+ unsigned int sethi1, sethi2, jmpl;
4172+
4173+ err = get_user(sethi1, (unsigned int *)regs->pc);
4174+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176+
4177+ if (err)
4178+ break;
4179+
4180+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183+ {
4184+ unsigned int addr;
4185+
4186+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187+ addr = regs->u_regs[UREG_G1];
4188+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189+ regs->pc = addr;
4190+ regs->npc = addr+4;
4191+ return 2;
4192+ }
4193+ } while (0);
4194+
4195+ { /* PaX: patched PLT emulation #2 */
4196+ unsigned int ba;
4197+
4198+ err = get_user(ba, (unsigned int *)regs->pc);
4199+
4200+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201+ unsigned int addr;
4202+
4203+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204+ regs->pc = addr;
4205+ regs->npc = addr+4;
4206+ return 2;
4207+ }
4208+ }
4209+
4210+ do { /* PaX: patched PLT emulation #3 */
4211+ unsigned int sethi, jmpl, nop;
4212+
4213+ err = get_user(sethi, (unsigned int *)regs->pc);
4214+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216+
4217+ if (err)
4218+ break;
4219+
4220+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222+ nop == 0x01000000U)
4223+ {
4224+ unsigned int addr;
4225+
4226+ addr = (sethi & 0x003FFFFFU) << 10;
4227+ regs->u_regs[UREG_G1] = addr;
4228+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229+ regs->pc = addr;
4230+ regs->npc = addr+4;
4231+ return 2;
4232+ }
4233+ } while (0);
4234+
4235+ do { /* PaX: unpatched PLT emulation step 1 */
4236+ unsigned int sethi, ba, nop;
4237+
4238+ err = get_user(sethi, (unsigned int *)regs->pc);
4239+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241+
4242+ if (err)
4243+ break;
4244+
4245+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247+ nop == 0x01000000U)
4248+ {
4249+ unsigned int addr, save, call;
4250+
4251+ if ((ba & 0xFFC00000U) == 0x30800000U)
4252+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253+ else
4254+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255+
4256+ err = get_user(save, (unsigned int *)addr);
4257+ err |= get_user(call, (unsigned int *)(addr+4));
4258+ err |= get_user(nop, (unsigned int *)(addr+8));
4259+ if (err)
4260+ break;
4261+
4262+#ifdef CONFIG_PAX_DLRESOLVE
4263+ if (save == 0x9DE3BFA8U &&
4264+ (call & 0xC0000000U) == 0x40000000U &&
4265+ nop == 0x01000000U)
4266+ {
4267+ struct vm_area_struct *vma;
4268+ unsigned long call_dl_resolve;
4269+
4270+ down_read(&current->mm->mmap_sem);
4271+ call_dl_resolve = current->mm->call_dl_resolve;
4272+ up_read(&current->mm->mmap_sem);
4273+ if (likely(call_dl_resolve))
4274+ goto emulate;
4275+
4276+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277+
4278+ down_write(&current->mm->mmap_sem);
4279+ if (current->mm->call_dl_resolve) {
4280+ call_dl_resolve = current->mm->call_dl_resolve;
4281+ up_write(&current->mm->mmap_sem);
4282+ if (vma)
4283+ kmem_cache_free(vm_area_cachep, vma);
4284+ goto emulate;
4285+ }
4286+
4287+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289+ up_write(&current->mm->mmap_sem);
4290+ if (vma)
4291+ kmem_cache_free(vm_area_cachep, vma);
4292+ return 1;
4293+ }
4294+
4295+ if (pax_insert_vma(vma, call_dl_resolve)) {
4296+ up_write(&current->mm->mmap_sem);
4297+ kmem_cache_free(vm_area_cachep, vma);
4298+ return 1;
4299+ }
4300+
4301+ current->mm->call_dl_resolve = call_dl_resolve;
4302+ up_write(&current->mm->mmap_sem);
4303+
4304+emulate:
4305+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306+ regs->pc = call_dl_resolve;
4307+ regs->npc = addr+4;
4308+ return 3;
4309+ }
4310+#endif
4311+
4312+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313+ if ((save & 0xFFC00000U) == 0x05000000U &&
4314+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4315+ nop == 0x01000000U)
4316+ {
4317+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318+ regs->u_regs[UREG_G2] = addr + 4;
4319+ addr = (save & 0x003FFFFFU) << 10;
4320+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321+ regs->pc = addr;
4322+ regs->npc = addr+4;
4323+ return 3;
4324+ }
4325+ }
4326+ } while (0);
4327+
4328+ do { /* PaX: unpatched PLT emulation step 2 */
4329+ unsigned int save, call, nop;
4330+
4331+ err = get_user(save, (unsigned int *)(regs->pc-4));
4332+ err |= get_user(call, (unsigned int *)regs->pc);
4333+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334+ if (err)
4335+ break;
4336+
4337+ if (save == 0x9DE3BFA8U &&
4338+ (call & 0xC0000000U) == 0x40000000U &&
4339+ nop == 0x01000000U)
4340+ {
4341+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342+
4343+ regs->u_regs[UREG_RETPC] = regs->pc;
4344+ regs->pc = dl_resolve;
4345+ regs->npc = dl_resolve+4;
4346+ return 3;
4347+ }
4348+ } while (0);
4349+#endif
4350+
4351+ return 1;
4352+}
4353+
4354+void pax_report_insns(void *pc, void *sp)
4355+{
4356+ unsigned long i;
4357+
4358+ printk(KERN_ERR "PAX: bytes at PC: ");
4359+ for (i = 0; i < 8; i++) {
4360+ unsigned int c;
4361+ if (get_user(c, (unsigned int *)pc+i))
4362+ printk(KERN_CONT "???????? ");
4363+ else
4364+ printk(KERN_CONT "%08x ", c);
4365+ }
4366+ printk("\n");
4367+}
4368+#endif
4369+
4370 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371 int text_fault)
4372 {
4373@@ -281,6 +546,24 @@ good_area:
4374 if(!(vma->vm_flags & VM_WRITE))
4375 goto bad_area;
4376 } else {
4377+
4378+#ifdef CONFIG_PAX_PAGEEXEC
4379+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380+ up_read(&mm->mmap_sem);
4381+ switch (pax_handle_fetch_fault(regs)) {
4382+
4383+#ifdef CONFIG_PAX_EMUPLT
4384+ case 2:
4385+ case 3:
4386+ return;
4387+#endif
4388+
4389+ }
4390+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391+ do_group_exit(SIGKILL);
4392+ }
4393+#endif
4394+
4395 /* Allow reads even for write-only mappings */
4396 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397 goto bad_area;
4398diff -urNp linux-3.0.4/arch/sparc/mm/fault_64.c linux-3.0.4/arch/sparc/mm/fault_64.c
4399--- linux-3.0.4/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4400+++ linux-3.0.4/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4401@@ -21,6 +21,9 @@
4402 #include <linux/kprobes.h>
4403 #include <linux/kdebug.h>
4404 #include <linux/percpu.h>
4405+#include <linux/slab.h>
4406+#include <linux/pagemap.h>
4407+#include <linux/compiler.h>
4408
4409 #include <asm/page.h>
4410 #include <asm/pgtable.h>
4411@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413 regs->tpc);
4414 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418 dump_stack();
4419 unhandled_fault(regs->tpc, current, regs);
4420@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421 show_regs(regs);
4422 }
4423
4424+#ifdef CONFIG_PAX_PAGEEXEC
4425+#ifdef CONFIG_PAX_DLRESOLVE
4426+static void pax_emuplt_close(struct vm_area_struct *vma)
4427+{
4428+ vma->vm_mm->call_dl_resolve = 0UL;
4429+}
4430+
4431+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432+{
4433+ unsigned int *kaddr;
4434+
4435+ vmf->page = alloc_page(GFP_HIGHUSER);
4436+ if (!vmf->page)
4437+ return VM_FAULT_OOM;
4438+
4439+ kaddr = kmap(vmf->page);
4440+ memset(kaddr, 0, PAGE_SIZE);
4441+ kaddr[0] = 0x9DE3BFA8U; /* save */
4442+ flush_dcache_page(vmf->page);
4443+ kunmap(vmf->page);
4444+ return VM_FAULT_MAJOR;
4445+}
4446+
4447+static const struct vm_operations_struct pax_vm_ops = {
4448+ .close = pax_emuplt_close,
4449+ .fault = pax_emuplt_fault
4450+};
4451+
4452+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453+{
4454+ int ret;
4455+
4456+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4457+ vma->vm_mm = current->mm;
4458+ vma->vm_start = addr;
4459+ vma->vm_end = addr + PAGE_SIZE;
4460+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462+ vma->vm_ops = &pax_vm_ops;
4463+
4464+ ret = insert_vm_struct(current->mm, vma);
4465+ if (ret)
4466+ return ret;
4467+
4468+ ++current->mm->total_vm;
4469+ return 0;
4470+}
4471+#endif
4472+
4473+/*
4474+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4475+ *
4476+ * returns 1 when task should be killed
4477+ * 2 when patched PLT trampoline was detected
4478+ * 3 when unpatched PLT trampoline was detected
4479+ */
4480+static int pax_handle_fetch_fault(struct pt_regs *regs)
4481+{
4482+
4483+#ifdef CONFIG_PAX_EMUPLT
4484+ int err;
4485+
4486+ do { /* PaX: patched PLT emulation #1 */
4487+ unsigned int sethi1, sethi2, jmpl;
4488+
4489+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4490+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492+
4493+ if (err)
4494+ break;
4495+
4496+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499+ {
4500+ unsigned long addr;
4501+
4502+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503+ addr = regs->u_regs[UREG_G1];
4504+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505+
4506+ if (test_thread_flag(TIF_32BIT))
4507+ addr &= 0xFFFFFFFFUL;
4508+
4509+ regs->tpc = addr;
4510+ regs->tnpc = addr+4;
4511+ return 2;
4512+ }
4513+ } while (0);
4514+
4515+ { /* PaX: patched PLT emulation #2 */
4516+ unsigned int ba;
4517+
4518+ err = get_user(ba, (unsigned int *)regs->tpc);
4519+
4520+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521+ unsigned long addr;
4522+
4523+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524+
4525+ if (test_thread_flag(TIF_32BIT))
4526+ addr &= 0xFFFFFFFFUL;
4527+
4528+ regs->tpc = addr;
4529+ regs->tnpc = addr+4;
4530+ return 2;
4531+ }
4532+ }
4533+
4534+ do { /* PaX: patched PLT emulation #3 */
4535+ unsigned int sethi, jmpl, nop;
4536+
4537+ err = get_user(sethi, (unsigned int *)regs->tpc);
4538+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540+
4541+ if (err)
4542+ break;
4543+
4544+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546+ nop == 0x01000000U)
4547+ {
4548+ unsigned long addr;
4549+
4550+ addr = (sethi & 0x003FFFFFU) << 10;
4551+ regs->u_regs[UREG_G1] = addr;
4552+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553+
4554+ if (test_thread_flag(TIF_32BIT))
4555+ addr &= 0xFFFFFFFFUL;
4556+
4557+ regs->tpc = addr;
4558+ regs->tnpc = addr+4;
4559+ return 2;
4560+ }
4561+ } while (0);
4562+
4563+ do { /* PaX: patched PLT emulation #4 */
4564+ unsigned int sethi, mov1, call, mov2;
4565+
4566+ err = get_user(sethi, (unsigned int *)regs->tpc);
4567+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570+
4571+ if (err)
4572+ break;
4573+
4574+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575+ mov1 == 0x8210000FU &&
4576+ (call & 0xC0000000U) == 0x40000000U &&
4577+ mov2 == 0x9E100001U)
4578+ {
4579+ unsigned long addr;
4580+
4581+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583+
4584+ if (test_thread_flag(TIF_32BIT))
4585+ addr &= 0xFFFFFFFFUL;
4586+
4587+ regs->tpc = addr;
4588+ regs->tnpc = addr+4;
4589+ return 2;
4590+ }
4591+ } while (0);
4592+
4593+ do { /* PaX: patched PLT emulation #5 */
4594+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595+
4596+ err = get_user(sethi, (unsigned int *)regs->tpc);
4597+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604+
4605+ if (err)
4606+ break;
4607+
4608+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611+ (or1 & 0xFFFFE000U) == 0x82106000U &&
4612+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613+ sllx == 0x83287020U &&
4614+ jmpl == 0x81C04005U &&
4615+ nop == 0x01000000U)
4616+ {
4617+ unsigned long addr;
4618+
4619+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620+ regs->u_regs[UREG_G1] <<= 32;
4621+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623+ regs->tpc = addr;
4624+ regs->tnpc = addr+4;
4625+ return 2;
4626+ }
4627+ } while (0);
4628+
4629+ do { /* PaX: patched PLT emulation #6 */
4630+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4631+
4632+ err = get_user(sethi, (unsigned int *)regs->tpc);
4633+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639+
4640+ if (err)
4641+ break;
4642+
4643+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646+ sllx == 0x83287020U &&
4647+ (or & 0xFFFFE000U) == 0x8A116000U &&
4648+ jmpl == 0x81C04005U &&
4649+ nop == 0x01000000U)
4650+ {
4651+ unsigned long addr;
4652+
4653+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654+ regs->u_regs[UREG_G1] <<= 32;
4655+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657+ regs->tpc = addr;
4658+ regs->tnpc = addr+4;
4659+ return 2;
4660+ }
4661+ } while (0);
4662+
4663+ do { /* PaX: unpatched PLT emulation step 1 */
4664+ unsigned int sethi, ba, nop;
4665+
4666+ err = get_user(sethi, (unsigned int *)regs->tpc);
4667+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669+
4670+ if (err)
4671+ break;
4672+
4673+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675+ nop == 0x01000000U)
4676+ {
4677+ unsigned long addr;
4678+ unsigned int save, call;
4679+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680+
4681+ if ((ba & 0xFFC00000U) == 0x30800000U)
4682+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683+ else
4684+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685+
4686+ if (test_thread_flag(TIF_32BIT))
4687+ addr &= 0xFFFFFFFFUL;
4688+
4689+ err = get_user(save, (unsigned int *)addr);
4690+ err |= get_user(call, (unsigned int *)(addr+4));
4691+ err |= get_user(nop, (unsigned int *)(addr+8));
4692+ if (err)
4693+ break;
4694+
4695+#ifdef CONFIG_PAX_DLRESOLVE
4696+ if (save == 0x9DE3BFA8U &&
4697+ (call & 0xC0000000U) == 0x40000000U &&
4698+ nop == 0x01000000U)
4699+ {
4700+ struct vm_area_struct *vma;
4701+ unsigned long call_dl_resolve;
4702+
4703+ down_read(&current->mm->mmap_sem);
4704+ call_dl_resolve = current->mm->call_dl_resolve;
4705+ up_read(&current->mm->mmap_sem);
4706+ if (likely(call_dl_resolve))
4707+ goto emulate;
4708+
4709+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710+
4711+ down_write(&current->mm->mmap_sem);
4712+ if (current->mm->call_dl_resolve) {
4713+ call_dl_resolve = current->mm->call_dl_resolve;
4714+ up_write(&current->mm->mmap_sem);
4715+ if (vma)
4716+ kmem_cache_free(vm_area_cachep, vma);
4717+ goto emulate;
4718+ }
4719+
4720+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722+ up_write(&current->mm->mmap_sem);
4723+ if (vma)
4724+ kmem_cache_free(vm_area_cachep, vma);
4725+ return 1;
4726+ }
4727+
4728+ if (pax_insert_vma(vma, call_dl_resolve)) {
4729+ up_write(&current->mm->mmap_sem);
4730+ kmem_cache_free(vm_area_cachep, vma);
4731+ return 1;
4732+ }
4733+
4734+ current->mm->call_dl_resolve = call_dl_resolve;
4735+ up_write(&current->mm->mmap_sem);
4736+
4737+emulate:
4738+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739+ regs->tpc = call_dl_resolve;
4740+ regs->tnpc = addr+4;
4741+ return 3;
4742+ }
4743+#endif
4744+
4745+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746+ if ((save & 0xFFC00000U) == 0x05000000U &&
4747+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4748+ nop == 0x01000000U)
4749+ {
4750+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751+ regs->u_regs[UREG_G2] = addr + 4;
4752+ addr = (save & 0x003FFFFFU) << 10;
4753+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754+
4755+ if (test_thread_flag(TIF_32BIT))
4756+ addr &= 0xFFFFFFFFUL;
4757+
4758+ regs->tpc = addr;
4759+ regs->tnpc = addr+4;
4760+ return 3;
4761+ }
4762+
4763+ /* PaX: 64-bit PLT stub */
4764+ err = get_user(sethi1, (unsigned int *)addr);
4765+ err |= get_user(sethi2, (unsigned int *)(addr+4));
4766+ err |= get_user(or1, (unsigned int *)(addr+8));
4767+ err |= get_user(or2, (unsigned int *)(addr+12));
4768+ err |= get_user(sllx, (unsigned int *)(addr+16));
4769+ err |= get_user(add, (unsigned int *)(addr+20));
4770+ err |= get_user(jmpl, (unsigned int *)(addr+24));
4771+ err |= get_user(nop, (unsigned int *)(addr+28));
4772+ if (err)
4773+ break;
4774+
4775+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777+ (or1 & 0xFFFFE000U) == 0x88112000U &&
4778+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779+ sllx == 0x89293020U &&
4780+ add == 0x8A010005U &&
4781+ jmpl == 0x89C14000U &&
4782+ nop == 0x01000000U)
4783+ {
4784+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786+ regs->u_regs[UREG_G4] <<= 32;
4787+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789+ regs->u_regs[UREG_G4] = addr + 24;
4790+ addr = regs->u_regs[UREG_G5];
4791+ regs->tpc = addr;
4792+ regs->tnpc = addr+4;
4793+ return 3;
4794+ }
4795+ }
4796+ } while (0);
4797+
4798+#ifdef CONFIG_PAX_DLRESOLVE
4799+ do { /* PaX: unpatched PLT emulation step 2 */
4800+ unsigned int save, call, nop;
4801+
4802+ err = get_user(save, (unsigned int *)(regs->tpc-4));
4803+ err |= get_user(call, (unsigned int *)regs->tpc);
4804+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805+ if (err)
4806+ break;
4807+
4808+ if (save == 0x9DE3BFA8U &&
4809+ (call & 0xC0000000U) == 0x40000000U &&
4810+ nop == 0x01000000U)
4811+ {
4812+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813+
4814+ if (test_thread_flag(TIF_32BIT))
4815+ dl_resolve &= 0xFFFFFFFFUL;
4816+
4817+ regs->u_regs[UREG_RETPC] = regs->tpc;
4818+ regs->tpc = dl_resolve;
4819+ regs->tnpc = dl_resolve+4;
4820+ return 3;
4821+ }
4822+ } while (0);
4823+#endif
4824+
4825+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826+ unsigned int sethi, ba, nop;
4827+
4828+ err = get_user(sethi, (unsigned int *)regs->tpc);
4829+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831+
4832+ if (err)
4833+ break;
4834+
4835+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836+ (ba & 0xFFF00000U) == 0x30600000U &&
4837+ nop == 0x01000000U)
4838+ {
4839+ unsigned long addr;
4840+
4841+ addr = (sethi & 0x003FFFFFU) << 10;
4842+ regs->u_regs[UREG_G1] = addr;
4843+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844+
4845+ if (test_thread_flag(TIF_32BIT))
4846+ addr &= 0xFFFFFFFFUL;
4847+
4848+ regs->tpc = addr;
4849+ regs->tnpc = addr+4;
4850+ return 2;
4851+ }
4852+ } while (0);
4853+
4854+#endif
4855+
4856+ return 1;
4857+}
4858+
4859+void pax_report_insns(void *pc, void *sp)
4860+{
4861+ unsigned long i;
4862+
4863+ printk(KERN_ERR "PAX: bytes at PC: ");
4864+ for (i = 0; i < 8; i++) {
4865+ unsigned int c;
4866+ if (get_user(c, (unsigned int *)pc+i))
4867+ printk(KERN_CONT "???????? ");
4868+ else
4869+ printk(KERN_CONT "%08x ", c);
4870+ }
4871+ printk("\n");
4872+}
4873+#endif
4874+
4875 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876 {
4877 struct mm_struct *mm = current->mm;
4878@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879 if (!vma)
4880 goto bad_area;
4881
4882+#ifdef CONFIG_PAX_PAGEEXEC
4883+ /* PaX: detect ITLB misses on non-exec pages */
4884+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886+ {
4887+ if (address != regs->tpc)
4888+ goto good_area;
4889+
4890+ up_read(&mm->mmap_sem);
4891+ switch (pax_handle_fetch_fault(regs)) {
4892+
4893+#ifdef CONFIG_PAX_EMUPLT
4894+ case 2:
4895+ case 3:
4896+ return;
4897+#endif
4898+
4899+ }
4900+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901+ do_group_exit(SIGKILL);
4902+ }
4903+#endif
4904+
4905 /* Pure DTLB misses do not tell us whether the fault causing
4906 * load/store/atomic was a write or not, it only says that there
4907 * was no match. So in such a case we (carefully) read the
4908diff -urNp linux-3.0.4/arch/sparc/mm/hugetlbpage.c linux-3.0.4/arch/sparc/mm/hugetlbpage.c
4909--- linux-3.0.4/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
4910+++ linux-3.0.4/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
4911@@ -68,7 +68,7 @@ full_search:
4912 }
4913 return -ENOMEM;
4914 }
4915- if (likely(!vma || addr + len <= vma->vm_start)) {
4916+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /*
4918 * Remember the place where we stopped the search:
4919 */
4920@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921 /* make sure it can fit in the remaining address space */
4922 if (likely(addr > len)) {
4923 vma = find_vma(mm, addr-len);
4924- if (!vma || addr <= vma->vm_start) {
4925+ if (check_heap_stack_gap(vma, addr - len, len)) {
4926 /* remember the address as a hint for next time */
4927 return (mm->free_area_cache = addr-len);
4928 }
4929@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930 if (unlikely(mm->mmap_base < len))
4931 goto bottomup;
4932
4933- addr = (mm->mmap_base-len) & HPAGE_MASK;
4934+ addr = mm->mmap_base - len;
4935
4936 do {
4937+ addr &= HPAGE_MASK;
4938 /*
4939 * Lookup failure means no vma is above this address,
4940 * else if new region fits below vma->vm_start,
4941 * return with success:
4942 */
4943 vma = find_vma(mm, addr);
4944- if (likely(!vma || addr+len <= vma->vm_start)) {
4945+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4946 /* remember the address as a hint for next time */
4947 return (mm->free_area_cache = addr);
4948 }
4949@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950 mm->cached_hole_size = vma->vm_start - addr;
4951
4952 /* try just below the current vma->vm_start */
4953- addr = (vma->vm_start-len) & HPAGE_MASK;
4954- } while (likely(len < vma->vm_start));
4955+ addr = skip_heap_stack_gap(vma, len);
4956+ } while (!IS_ERR_VALUE(addr));
4957
4958 bottomup:
4959 /*
4960@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961 if (addr) {
4962 addr = ALIGN(addr, HPAGE_SIZE);
4963 vma = find_vma(mm, addr);
4964- if (task_size - len >= addr &&
4965- (!vma || addr + len <= vma->vm_start))
4966+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967 return addr;
4968 }
4969 if (mm->get_unmapped_area == arch_get_unmapped_area)
4970diff -urNp linux-3.0.4/arch/sparc/mm/init_32.c linux-3.0.4/arch/sparc/mm/init_32.c
4971--- linux-3.0.4/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972+++ linux-3.0.4/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973@@ -316,6 +316,9 @@ extern void device_scan(void);
4974 pgprot_t PAGE_SHARED __read_mostly;
4975 EXPORT_SYMBOL(PAGE_SHARED);
4976
4977+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979+
4980 void __init paging_init(void)
4981 {
4982 switch(sparc_cpu_model) {
4983@@ -344,17 +347,17 @@ void __init paging_init(void)
4984
4985 /* Initialize the protection map with non-constant, MMU dependent values. */
4986 protection_map[0] = PAGE_NONE;
4987- protection_map[1] = PAGE_READONLY;
4988- protection_map[2] = PAGE_COPY;
4989- protection_map[3] = PAGE_COPY;
4990+ protection_map[1] = PAGE_READONLY_NOEXEC;
4991+ protection_map[2] = PAGE_COPY_NOEXEC;
4992+ protection_map[3] = PAGE_COPY_NOEXEC;
4993 protection_map[4] = PAGE_READONLY;
4994 protection_map[5] = PAGE_READONLY;
4995 protection_map[6] = PAGE_COPY;
4996 protection_map[7] = PAGE_COPY;
4997 protection_map[8] = PAGE_NONE;
4998- protection_map[9] = PAGE_READONLY;
4999- protection_map[10] = PAGE_SHARED;
5000- protection_map[11] = PAGE_SHARED;
5001+ protection_map[9] = PAGE_READONLY_NOEXEC;
5002+ protection_map[10] = PAGE_SHARED_NOEXEC;
5003+ protection_map[11] = PAGE_SHARED_NOEXEC;
5004 protection_map[12] = PAGE_READONLY;
5005 protection_map[13] = PAGE_READONLY;
5006 protection_map[14] = PAGE_SHARED;
5007diff -urNp linux-3.0.4/arch/sparc/mm/Makefile linux-3.0.4/arch/sparc/mm/Makefile
5008--- linux-3.0.4/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
5009+++ linux-3.0.4/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
5010@@ -2,7 +2,7 @@
5011 #
5012
5013 asflags-y := -ansi
5014-ccflags-y := -Werror
5015+#ccflags-y := -Werror
5016
5017 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5018 obj-y += fault_$(BITS).o
5019diff -urNp linux-3.0.4/arch/sparc/mm/srmmu.c linux-3.0.4/arch/sparc/mm/srmmu.c
5020--- linux-3.0.4/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5021+++ linux-3.0.4/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5022@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026+
5027+#ifdef CONFIG_PAX_PAGEEXEC
5028+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031+#endif
5032+
5033 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035
5036diff -urNp linux-3.0.4/arch/um/include/asm/kmap_types.h linux-3.0.4/arch/um/include/asm/kmap_types.h
5037--- linux-3.0.4/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5038+++ linux-3.0.4/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5039@@ -23,6 +23,7 @@ enum km_type {
5040 KM_IRQ1,
5041 KM_SOFTIRQ0,
5042 KM_SOFTIRQ1,
5043+ KM_CLEARPAGE,
5044 KM_TYPE_NR
5045 };
5046
5047diff -urNp linux-3.0.4/arch/um/include/asm/page.h linux-3.0.4/arch/um/include/asm/page.h
5048--- linux-3.0.4/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5049+++ linux-3.0.4/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5050@@ -14,6 +14,9 @@
5051 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5052 #define PAGE_MASK (~(PAGE_SIZE-1))
5053
5054+#define ktla_ktva(addr) (addr)
5055+#define ktva_ktla(addr) (addr)
5056+
5057 #ifndef __ASSEMBLY__
5058
5059 struct page;
5060diff -urNp linux-3.0.4/arch/um/kernel/process.c linux-3.0.4/arch/um/kernel/process.c
5061--- linux-3.0.4/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5062+++ linux-3.0.4/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5063@@ -404,22 +404,6 @@ int singlestepping(void * t)
5064 return 2;
5065 }
5066
5067-/*
5068- * Only x86 and x86_64 have an arch_align_stack().
5069- * All other arches have "#define arch_align_stack(x) (x)"
5070- * in their asm/system.h
5071- * As this is included in UML from asm-um/system-generic.h,
5072- * we can use it to behave as the subarch does.
5073- */
5074-#ifndef arch_align_stack
5075-unsigned long arch_align_stack(unsigned long sp)
5076-{
5077- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078- sp -= get_random_int() % 8192;
5079- return sp & ~0xf;
5080-}
5081-#endif
5082-
5083 unsigned long get_wchan(struct task_struct *p)
5084 {
5085 unsigned long stack_page, sp, ip;
5086diff -urNp linux-3.0.4/arch/um/sys-i386/syscalls.c linux-3.0.4/arch/um/sys-i386/syscalls.c
5087--- linux-3.0.4/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5088+++ linux-3.0.4/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5089@@ -11,6 +11,21 @@
5090 #include "asm/uaccess.h"
5091 #include "asm/unistd.h"
5092
5093+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5094+{
5095+ unsigned long pax_task_size = TASK_SIZE;
5096+
5097+#ifdef CONFIG_PAX_SEGMEXEC
5098+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5099+ pax_task_size = SEGMEXEC_TASK_SIZE;
5100+#endif
5101+
5102+ if (len > pax_task_size || addr > pax_task_size - len)
5103+ return -EINVAL;
5104+
5105+ return 0;
5106+}
5107+
5108 /*
5109 * The prototype on i386 is:
5110 *
5111diff -urNp linux-3.0.4/arch/x86/boot/bitops.h linux-3.0.4/arch/x86/boot/bitops.h
5112--- linux-3.0.4/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5113+++ linux-3.0.4/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5114@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5115 u8 v;
5116 const u32 *p = (const u32 *)addr;
5117
5118- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5119+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5120 return v;
5121 }
5122
5123@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5124
5125 static inline void set_bit(int nr, void *addr)
5126 {
5127- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5128+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5129 }
5130
5131 #endif /* BOOT_BITOPS_H */
5132diff -urNp linux-3.0.4/arch/x86/boot/boot.h linux-3.0.4/arch/x86/boot/boot.h
5133--- linux-3.0.4/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5134+++ linux-3.0.4/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5135@@ -85,7 +85,7 @@ static inline void io_delay(void)
5136 static inline u16 ds(void)
5137 {
5138 u16 seg;
5139- asm("movw %%ds,%0" : "=rm" (seg));
5140+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5141 return seg;
5142 }
5143
5144@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5145 static inline int memcmp(const void *s1, const void *s2, size_t len)
5146 {
5147 u8 diff;
5148- asm("repe; cmpsb; setnz %0"
5149+ asm volatile("repe; cmpsb; setnz %0"
5150 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5151 return diff;
5152 }
5153diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_32.S linux-3.0.4/arch/x86/boot/compressed/head_32.S
5154--- linux-3.0.4/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5155+++ linux-3.0.4/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5156@@ -76,7 +76,7 @@ ENTRY(startup_32)
5157 notl %eax
5158 andl %eax, %ebx
5159 #else
5160- movl $LOAD_PHYSICAL_ADDR, %ebx
5161+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5162 #endif
5163
5164 /* Target address to relocate to for decompression */
5165@@ -162,7 +162,7 @@ relocated:
5166 * and where it was actually loaded.
5167 */
5168 movl %ebp, %ebx
5169- subl $LOAD_PHYSICAL_ADDR, %ebx
5170+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5171 jz 2f /* Nothing to be done if loaded at compiled addr. */
5172 /*
5173 * Process relocations.
5174@@ -170,8 +170,7 @@ relocated:
5175
5176 1: subl $4, %edi
5177 movl (%edi), %ecx
5178- testl %ecx, %ecx
5179- jz 2f
5180+ jecxz 2f
5181 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5182 jmp 1b
5183 2:
5184diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_64.S linux-3.0.4/arch/x86/boot/compressed/head_64.S
5185--- linux-3.0.4/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5186+++ linux-3.0.4/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5187@@ -91,7 +91,7 @@ ENTRY(startup_32)
5188 notl %eax
5189 andl %eax, %ebx
5190 #else
5191- movl $LOAD_PHYSICAL_ADDR, %ebx
5192+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5193 #endif
5194
5195 /* Target address to relocate to for decompression */
5196@@ -233,7 +233,7 @@ ENTRY(startup_64)
5197 notq %rax
5198 andq %rax, %rbp
5199 #else
5200- movq $LOAD_PHYSICAL_ADDR, %rbp
5201+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205diff -urNp linux-3.0.4/arch/x86/boot/compressed/Makefile linux-3.0.4/arch/x86/boot/compressed/Makefile
5206--- linux-3.0.4/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5207+++ linux-3.0.4/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5208@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5209 KBUILD_CFLAGS += $(cflags-y)
5210 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5211 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5212+ifdef CONSTIFY_PLUGIN
5213+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5214+endif
5215
5216 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5217 GCOV_PROFILE := n
5218diff -urNp linux-3.0.4/arch/x86/boot/compressed/misc.c linux-3.0.4/arch/x86/boot/compressed/misc.c
5219--- linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5220+++ linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5221@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5222 case PT_LOAD:
5223 #ifdef CONFIG_RELOCATABLE
5224 dest = output;
5225- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5226+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5227 #else
5228 dest = (void *)(phdr->p_paddr);
5229 #endif
5230@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5231 error("Destination address too large");
5232 #endif
5233 #ifndef CONFIG_RELOCATABLE
5234- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5235+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5236 error("Wrong destination address");
5237 #endif
5238
5239diff -urNp linux-3.0.4/arch/x86/boot/compressed/relocs.c linux-3.0.4/arch/x86/boot/compressed/relocs.c
5240--- linux-3.0.4/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5241+++ linux-3.0.4/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5242@@ -13,8 +13,11 @@
5243
5244 static void die(char *fmt, ...);
5245
5246+#include "../../../../include/generated/autoconf.h"
5247+
5248 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5249 static Elf32_Ehdr ehdr;
5250+static Elf32_Phdr *phdr;
5251 static unsigned long reloc_count, reloc_idx;
5252 static unsigned long *relocs;
5253
5254@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5255 }
5256 }
5257
5258+static void read_phdrs(FILE *fp)
5259+{
5260+ unsigned int i;
5261+
5262+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5263+ if (!phdr) {
5264+ die("Unable to allocate %d program headers\n",
5265+ ehdr.e_phnum);
5266+ }
5267+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5268+ die("Seek to %d failed: %s\n",
5269+ ehdr.e_phoff, strerror(errno));
5270+ }
5271+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5272+ die("Cannot read ELF program headers: %s\n",
5273+ strerror(errno));
5274+ }
5275+ for(i = 0; i < ehdr.e_phnum; i++) {
5276+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5277+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5278+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5279+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5280+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5281+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5282+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5283+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5284+ }
5285+
5286+}
5287+
5288 static void read_shdrs(FILE *fp)
5289 {
5290- int i;
5291+ unsigned int i;
5292 Elf32_Shdr shdr;
5293
5294 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5295@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5296
5297 static void read_strtabs(FILE *fp)
5298 {
5299- int i;
5300+ unsigned int i;
5301 for (i = 0; i < ehdr.e_shnum; i++) {
5302 struct section *sec = &secs[i];
5303 if (sec->shdr.sh_type != SHT_STRTAB) {
5304@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5305
5306 static void read_symtabs(FILE *fp)
5307 {
5308- int i,j;
5309+ unsigned int i,j;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_SYMTAB) {
5313@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5314
5315 static void read_relocs(FILE *fp)
5316 {
5317- int i,j;
5318+ unsigned int i,j;
5319+ uint32_t base;
5320+
5321 for (i = 0; i < ehdr.e_shnum; i++) {
5322 struct section *sec = &secs[i];
5323 if (sec->shdr.sh_type != SHT_REL) {
5324@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5325 die("Cannot read symbol table: %s\n",
5326 strerror(errno));
5327 }
5328+ base = 0;
5329+ for (j = 0; j < ehdr.e_phnum; j++) {
5330+ if (phdr[j].p_type != PT_LOAD )
5331+ continue;
5332+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5333+ continue;
5334+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5335+ break;
5336+ }
5337 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5338 Elf32_Rel *rel = &sec->reltab[j];
5339- rel->r_offset = elf32_to_cpu(rel->r_offset);
5340+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5341 rel->r_info = elf32_to_cpu(rel->r_info);
5342 }
5343 }
5344@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5345
5346 static void print_absolute_symbols(void)
5347 {
5348- int i;
5349+ unsigned int i;
5350 printf("Absolute symbols\n");
5351 printf(" Num: Value Size Type Bind Visibility Name\n");
5352 for (i = 0; i < ehdr.e_shnum; i++) {
5353 struct section *sec = &secs[i];
5354 char *sym_strtab;
5355 Elf32_Sym *sh_symtab;
5356- int j;
5357+ unsigned int j;
5358
5359 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360 continue;
5361@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5362
5363 static void print_absolute_relocs(void)
5364 {
5365- int i, printed = 0;
5366+ unsigned int i, printed = 0;
5367
5368 for (i = 0; i < ehdr.e_shnum; i++) {
5369 struct section *sec = &secs[i];
5370 struct section *sec_applies, *sec_symtab;
5371 char *sym_strtab;
5372 Elf32_Sym *sh_symtab;
5373- int j;
5374+ unsigned int j;
5375 if (sec->shdr.sh_type != SHT_REL) {
5376 continue;
5377 }
5378@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5379
5380 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5381 {
5382- int i;
5383+ unsigned int i;
5384 /* Walk through the relocations */
5385 for (i = 0; i < ehdr.e_shnum; i++) {
5386 char *sym_strtab;
5387 Elf32_Sym *sh_symtab;
5388 struct section *sec_applies, *sec_symtab;
5389- int j;
5390+ unsigned int j;
5391 struct section *sec = &secs[i];
5392
5393 if (sec->shdr.sh_type != SHT_REL) {
5394@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5395 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5396 continue;
5397 }
5398+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5399+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5400+ continue;
5401+
5402+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5403+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5404+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5405+ continue;
5406+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5407+ continue;
5408+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5409+ continue;
5410+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5411+ continue;
5412+#endif
5413+
5414 switch (r_type) {
5415 case R_386_NONE:
5416 case R_386_PC32:
5417@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5418
5419 static void emit_relocs(int as_text)
5420 {
5421- int i;
5422+ unsigned int i;
5423 /* Count how many relocations I have and allocate space for them. */
5424 reloc_count = 0;
5425 walk_relocs(count_reloc);
5426@@ -665,6 +725,7 @@ int main(int argc, char **argv)
5427 fname, strerror(errno));
5428 }
5429 read_ehdr(fp);
5430+ read_phdrs(fp);
5431 read_shdrs(fp);
5432 read_strtabs(fp);
5433 read_symtabs(fp);
5434diff -urNp linux-3.0.4/arch/x86/boot/cpucheck.c linux-3.0.4/arch/x86/boot/cpucheck.c
5435--- linux-3.0.4/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5436+++ linux-3.0.4/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5437@@ -74,7 +74,7 @@ static int has_fpu(void)
5438 u16 fcw = -1, fsw = -1;
5439 u32 cr0;
5440
5441- asm("movl %%cr0,%0" : "=r" (cr0));
5442+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
5443 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5444 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5445 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5446@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5447 {
5448 u32 f0, f1;
5449
5450- asm("pushfl ; "
5451+ asm volatile("pushfl ; "
5452 "pushfl ; "
5453 "popl %0 ; "
5454 "movl %0,%1 ; "
5455@@ -115,7 +115,7 @@ static void get_flags(void)
5456 set_bit(X86_FEATURE_FPU, cpu.flags);
5457
5458 if (has_eflag(X86_EFLAGS_ID)) {
5459- asm("cpuid"
5460+ asm volatile("cpuid"
5461 : "=a" (max_intel_level),
5462 "=b" (cpu_vendor[0]),
5463 "=d" (cpu_vendor[1]),
5464@@ -124,7 +124,7 @@ static void get_flags(void)
5465
5466 if (max_intel_level >= 0x00000001 &&
5467 max_intel_level <= 0x0000ffff) {
5468- asm("cpuid"
5469+ asm volatile("cpuid"
5470 : "=a" (tfms),
5471 "=c" (cpu.flags[4]),
5472 "=d" (cpu.flags[0])
5473@@ -136,7 +136,7 @@ static void get_flags(void)
5474 cpu.model += ((tfms >> 16) & 0xf) << 4;
5475 }
5476
5477- asm("cpuid"
5478+ asm volatile("cpuid"
5479 : "=a" (max_amd_level)
5480 : "a" (0x80000000)
5481 : "ebx", "ecx", "edx");
5482@@ -144,7 +144,7 @@ static void get_flags(void)
5483 if (max_amd_level >= 0x80000001 &&
5484 max_amd_level <= 0x8000ffff) {
5485 u32 eax = 0x80000001;
5486- asm("cpuid"
5487+ asm volatile("cpuid"
5488 : "+a" (eax),
5489 "=c" (cpu.flags[6]),
5490 "=d" (cpu.flags[1])
5491@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5492 u32 ecx = MSR_K7_HWCR;
5493 u32 eax, edx;
5494
5495- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5496+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5497 eax &= ~(1 << 15);
5498- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5499+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5500
5501 get_flags(); /* Make sure it really did something */
5502 err = check_flags();
5503@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5504 u32 ecx = MSR_VIA_FCR;
5505 u32 eax, edx;
5506
5507- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5508+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5509 eax |= (1<<1)|(1<<7);
5510- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5511+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5512
5513 set_bit(X86_FEATURE_CX8, cpu.flags);
5514 err = check_flags();
5515@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5516 u32 eax, edx;
5517 u32 level = 1;
5518
5519- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5520- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5521- asm("cpuid"
5522+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5523+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5524+ asm volatile("cpuid"
5525 : "+a" (level), "=d" (cpu.flags[0])
5526 : : "ecx", "ebx");
5527- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5528+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529
5530 err = check_flags();
5531 }
5532diff -urNp linux-3.0.4/arch/x86/boot/header.S linux-3.0.4/arch/x86/boot/header.S
5533--- linux-3.0.4/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
5534+++ linux-3.0.4/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
5535@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5536 # single linked list of
5537 # struct setup_data
5538
5539-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5540+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5541
5542 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5543 #define VO_INIT_SIZE (VO__end - VO__text)
5544diff -urNp linux-3.0.4/arch/x86/boot/Makefile linux-3.0.4/arch/x86/boot/Makefile
5545--- linux-3.0.4/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5546+++ linux-3.0.4/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5547@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5548 $(call cc-option, -fno-stack-protector) \
5549 $(call cc-option, -mpreferred-stack-boundary=2)
5550 KBUILD_CFLAGS += $(call cc-option, -m32)
5551+ifdef CONSTIFY_PLUGIN
5552+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5553+endif
5554 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5555 GCOV_PROFILE := n
5556
5557diff -urNp linux-3.0.4/arch/x86/boot/memory.c linux-3.0.4/arch/x86/boot/memory.c
5558--- linux-3.0.4/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
5559+++ linux-3.0.4/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
5560@@ -19,7 +19,7 @@
5561
5562 static int detect_memory_e820(void)
5563 {
5564- int count = 0;
5565+ unsigned int count = 0;
5566 struct biosregs ireg, oreg;
5567 struct e820entry *desc = boot_params.e820_map;
5568 static struct e820entry buf; /* static so it is zeroed */
5569diff -urNp linux-3.0.4/arch/x86/boot/video.c linux-3.0.4/arch/x86/boot/video.c
5570--- linux-3.0.4/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
5571+++ linux-3.0.4/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
5572@@ -96,7 +96,7 @@ static void store_mode_params(void)
5573 static unsigned int get_entry(void)
5574 {
5575 char entry_buf[4];
5576- int i, len = 0;
5577+ unsigned int i, len = 0;
5578 int key;
5579 unsigned int v;
5580
5581diff -urNp linux-3.0.4/arch/x86/boot/video-vesa.c linux-3.0.4/arch/x86/boot/video-vesa.c
5582--- linux-3.0.4/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
5583+++ linux-3.0.4/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
5584@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5585
5586 boot_params.screen_info.vesapm_seg = oreg.es;
5587 boot_params.screen_info.vesapm_off = oreg.di;
5588+ boot_params.screen_info.vesapm_size = oreg.cx;
5589 }
5590
5591 /*
5592diff -urNp linux-3.0.4/arch/x86/ia32/ia32_aout.c linux-3.0.4/arch/x86/ia32/ia32_aout.c
5593--- linux-3.0.4/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
5594+++ linux-3.0.4/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
5595@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5596 unsigned long dump_start, dump_size;
5597 struct user32 dump;
5598
5599+ memset(&dump, 0, sizeof(dump));
5600+
5601 fs = get_fs();
5602 set_fs(KERNEL_DS);
5603 has_dumped = 1;
5604diff -urNp linux-3.0.4/arch/x86/ia32/ia32entry.S linux-3.0.4/arch/x86/ia32/ia32entry.S
5605--- linux-3.0.4/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
5606+++ linux-3.0.4/arch/x86/ia32/ia32entry.S 2011-08-25 17:36:37.000000000 -0400
5607@@ -13,6 +13,7 @@
5608 #include <asm/thread_info.h>
5609 #include <asm/segment.h>
5610 #include <asm/irqflags.h>
5611+#include <asm/pgtable.h>
5612 #include <linux/linkage.h>
5613
5614 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5615@@ -95,6 +96,29 @@ ENTRY(native_irq_enable_sysexit)
5616 ENDPROC(native_irq_enable_sysexit)
5617 #endif
5618
5619+ .macro pax_enter_kernel_user
5620+#ifdef CONFIG_PAX_MEMORY_UDEREF
5621+ call pax_enter_kernel_user
5622+#endif
5623+ .endm
5624+
5625+ .macro pax_exit_kernel_user
5626+#ifdef CONFIG_PAX_MEMORY_UDEREF
5627+ call pax_exit_kernel_user
5628+#endif
5629+#ifdef CONFIG_PAX_RANDKSTACK
5630+ pushq %rax
5631+ call pax_randomize_kstack
5632+ popq %rax
5633+#endif
5634+ .endm
5635+
5636+ .macro pax_erase_kstack
5637+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5638+ call pax_erase_kstack
5639+#endif
5640+ .endm
5641+
5642 /*
5643 * 32bit SYSENTER instruction entry.
5644 *
5645@@ -121,7 +145,7 @@ ENTRY(ia32_sysenter_target)
5646 CFI_REGISTER rsp,rbp
5647 SWAPGS_UNSAFE_STACK
5648 movq PER_CPU_VAR(kernel_stack), %rsp
5649- addq $(KERNEL_STACK_OFFSET),%rsp
5650+ pax_enter_kernel_user
5651 /*
5652 * No need to follow this irqs on/off section: the syscall
5653 * disabled irqs, here we enable it straight after entry:
5654@@ -134,7 +158,8 @@ ENTRY(ia32_sysenter_target)
5655 CFI_REL_OFFSET rsp,0
5656 pushfq_cfi
5657 /*CFI_REL_OFFSET rflags,0*/
5658- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5659+ GET_THREAD_INFO(%r10)
5660+ movl TI_sysenter_return(%r10), %r10d
5661 CFI_REGISTER rip,r10
5662 pushq_cfi $__USER32_CS
5663 /*CFI_REL_OFFSET cs,0*/
5664@@ -146,6 +171,12 @@ ENTRY(ia32_sysenter_target)
5665 SAVE_ARGS 0,0,1
5666 /* no need to do an access_ok check here because rbp has been
5667 32bit zero extended */
5668+
5669+#ifdef CONFIG_PAX_MEMORY_UDEREF
5670+ mov $PAX_USER_SHADOW_BASE,%r10
5671+ add %r10,%rbp
5672+#endif
5673+
5674 1: movl (%rbp),%ebp
5675 .section __ex_table,"a"
5676 .quad 1b,ia32_badarg
5677@@ -168,6 +199,8 @@ sysenter_dispatch:
5678 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5679 jnz sysexit_audit
5680 sysexit_from_sys_call:
5681+ pax_exit_kernel_user
5682+ pax_erase_kstack
5683 andl $~TS_COMPAT,TI_status(%r10)
5684 /* clear IF, that popfq doesn't enable interrupts early */
5685 andl $~0x200,EFLAGS-R11(%rsp)
5686@@ -194,6 +227,9 @@ sysexit_from_sys_call:
5687 movl %eax,%esi /* 2nd arg: syscall number */
5688 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5689 call audit_syscall_entry
5690+
5691+ pax_erase_kstack
5692+
5693 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5694 cmpq $(IA32_NR_syscalls-1),%rax
5695 ja ia32_badsys
5696@@ -246,6 +282,9 @@ sysenter_tracesys:
5697 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5698 movq %rsp,%rdi /* &pt_regs -> arg1 */
5699 call syscall_trace_enter
5700+
5701+ pax_erase_kstack
5702+
5703 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5704 RESTORE_REST
5705 cmpq $(IA32_NR_syscalls-1),%rax
5706@@ -277,19 +316,24 @@ ENDPROC(ia32_sysenter_target)
5707 ENTRY(ia32_cstar_target)
5708 CFI_STARTPROC32 simple
5709 CFI_SIGNAL_FRAME
5710- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5711+ CFI_DEF_CFA rsp,0
5712 CFI_REGISTER rip,rcx
5713 /*CFI_REGISTER rflags,r11*/
5714 SWAPGS_UNSAFE_STACK
5715 movl %esp,%r8d
5716 CFI_REGISTER rsp,r8
5717 movq PER_CPU_VAR(kernel_stack),%rsp
5718+
5719+#ifdef CONFIG_PAX_MEMORY_UDEREF
5720+ pax_enter_kernel_user
5721+#endif
5722+
5723 /*
5724 * No need to follow this irqs on/off section: the syscall
5725 * disabled irqs and here we enable it straight after entry:
5726 */
5727 ENABLE_INTERRUPTS(CLBR_NONE)
5728- SAVE_ARGS 8,1,1
5729+ SAVE_ARGS 8*6,1,1
5730 movl %eax,%eax /* zero extension */
5731 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5732 movq %rcx,RIP-ARGOFFSET(%rsp)
5733@@ -305,6 +349,12 @@ ENTRY(ia32_cstar_target)
5734 /* no need to do an access_ok check here because r8 has been
5735 32bit zero extended */
5736 /* hardware stack frame is complete now */
5737+
5738+#ifdef CONFIG_PAX_MEMORY_UDEREF
5739+ mov $PAX_USER_SHADOW_BASE,%r10
5740+ add %r10,%r8
5741+#endif
5742+
5743 1: movl (%r8),%r9d
5744 .section __ex_table,"a"
5745 .quad 1b,ia32_badarg
5746@@ -327,6 +377,8 @@ cstar_dispatch:
5747 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5748 jnz sysretl_audit
5749 sysretl_from_sys_call:
5750+ pax_exit_kernel_user
5751+ pax_erase_kstack
5752 andl $~TS_COMPAT,TI_status(%r10)
5753 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5754 movl RIP-ARGOFFSET(%rsp),%ecx
5755@@ -364,6 +416,9 @@ cstar_tracesys:
5756 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5757 movq %rsp,%rdi /* &pt_regs -> arg1 */
5758 call syscall_trace_enter
5759+
5760+ pax_erase_kstack
5761+
5762 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5763 RESTORE_REST
5764 xchgl %ebp,%r9d
5765@@ -409,6 +464,7 @@ ENTRY(ia32_syscall)
5766 CFI_REL_OFFSET rip,RIP-RIP
5767 PARAVIRT_ADJUST_EXCEPTION_FRAME
5768 SWAPGS
5769+ pax_enter_kernel_user
5770 /*
5771 * No need to follow this irqs on/off section: the syscall
5772 * disabled irqs and here we enable it straight after entry:
5773@@ -441,6 +497,9 @@ ia32_tracesys:
5774 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5775 movq %rsp,%rdi /* &pt_regs -> arg1 */
5776 call syscall_trace_enter
5777+
5778+ pax_erase_kstack
5779+
5780 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5781 RESTORE_REST
5782 cmpq $(IA32_NR_syscalls-1),%rax
5783diff -urNp linux-3.0.4/arch/x86/ia32/ia32_signal.c linux-3.0.4/arch/x86/ia32/ia32_signal.c
5784--- linux-3.0.4/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
5785+++ linux-3.0.4/arch/x86/ia32/ia32_signal.c 2011-08-23 21:47:55.000000000 -0400
5786@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5787 sp -= frame_size;
5788 /* Align the stack pointer according to the i386 ABI,
5789 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5790- sp = ((sp + 4) & -16ul) - 4;
5791+ sp = ((sp - 12) & -16ul) - 4;
5792 return (void __user *) sp;
5793 }
5794
5795@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5796 * These are actually not used anymore, but left because some
5797 * gdb versions depend on them as a marker.
5798 */
5799- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5800+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5801 } put_user_catch(err);
5802
5803 if (err)
5804@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5805 0xb8,
5806 __NR_ia32_rt_sigreturn,
5807 0x80cd,
5808- 0,
5809+ 0
5810 };
5811
5812 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5813@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5814
5815 if (ka->sa.sa_flags & SA_RESTORER)
5816 restorer = ka->sa.sa_restorer;
5817+ else if (current->mm->context.vdso)
5818+ /* Return stub is in 32bit vsyscall page */
5819+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5820 else
5821- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5822- rt_sigreturn);
5823+ restorer = &frame->retcode;
5824 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5825
5826 /*
5827 * Not actually used anymore, but left because some gdb
5828 * versions need it.
5829 */
5830- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5831+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5832 } put_user_catch(err);
5833
5834 if (err)
5835diff -urNp linux-3.0.4/arch/x86/include/asm/alternative.h linux-3.0.4/arch/x86/include/asm/alternative.h
5836--- linux-3.0.4/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
5837+++ linux-3.0.4/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
5838@@ -93,7 +93,7 @@ static inline int alternatives_text_rese
5839 ".section .discard,\"aw\",@progbits\n" \
5840 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5841 ".previous\n" \
5842- ".section .altinstr_replacement, \"ax\"\n" \
5843+ ".section .altinstr_replacement, \"a\"\n" \
5844 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5845 ".previous"
5846
5847diff -urNp linux-3.0.4/arch/x86/include/asm/apic.h linux-3.0.4/arch/x86/include/asm/apic.h
5848--- linux-3.0.4/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
5849+++ linux-3.0.4/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
5850@@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
5851
5852 #ifdef CONFIG_X86_LOCAL_APIC
5853
5854-extern unsigned int apic_verbosity;
5855+extern int apic_verbosity;
5856 extern int local_apic_timer_c2_ok;
5857
5858 extern int disable_apic;
5859diff -urNp linux-3.0.4/arch/x86/include/asm/apm.h linux-3.0.4/arch/x86/include/asm/apm.h
5860--- linux-3.0.4/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
5861+++ linux-3.0.4/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
5862@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5863 __asm__ __volatile__(APM_DO_ZERO_SEGS
5864 "pushl %%edi\n\t"
5865 "pushl %%ebp\n\t"
5866- "lcall *%%cs:apm_bios_entry\n\t"
5867+ "lcall *%%ss:apm_bios_entry\n\t"
5868 "setc %%al\n\t"
5869 "popl %%ebp\n\t"
5870 "popl %%edi\n\t"
5871@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5872 __asm__ __volatile__(APM_DO_ZERO_SEGS
5873 "pushl %%edi\n\t"
5874 "pushl %%ebp\n\t"
5875- "lcall *%%cs:apm_bios_entry\n\t"
5876+ "lcall *%%ss:apm_bios_entry\n\t"
5877 "setc %%bl\n\t"
5878 "popl %%ebp\n\t"
5879 "popl %%edi\n\t"
5880diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_32.h linux-3.0.4/arch/x86/include/asm/atomic64_32.h
5881--- linux-3.0.4/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
5882+++ linux-3.0.4/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
5883@@ -12,6 +12,14 @@ typedef struct {
5884 u64 __aligned(8) counter;
5885 } atomic64_t;
5886
5887+#ifdef CONFIG_PAX_REFCOUNT
5888+typedef struct {
5889+ u64 __aligned(8) counter;
5890+} atomic64_unchecked_t;
5891+#else
5892+typedef atomic64_t atomic64_unchecked_t;
5893+#endif
5894+
5895 #define ATOMIC64_INIT(val) { (val) }
5896
5897 #ifdef CONFIG_X86_CMPXCHG64
5898@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5899 }
5900
5901 /**
5902+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5903+ * @p: pointer to type atomic64_unchecked_t
5904+ * @o: expected value
5905+ * @n: new value
5906+ *
5907+ * Atomically sets @v to @n if it was equal to @o and returns
5908+ * the old value.
5909+ */
5910+
5911+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5912+{
5913+ return cmpxchg64(&v->counter, o, n);
5914+}
5915+
5916+/**
5917 * atomic64_xchg - xchg atomic64 variable
5918 * @v: pointer to type atomic64_t
5919 * @n: value to assign
5920@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5921 }
5922
5923 /**
5924+ * atomic64_set_unchecked - set atomic64 variable
5925+ * @v: pointer to type atomic64_unchecked_t
5926+ * @n: value to assign
5927+ *
5928+ * Atomically sets the value of @v to @n.
5929+ */
5930+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5931+{
5932+ unsigned high = (unsigned)(i >> 32);
5933+ unsigned low = (unsigned)i;
5934+ asm volatile(ATOMIC64_ALTERNATIVE(set)
5935+ : "+b" (low), "+c" (high)
5936+ : "S" (v)
5937+ : "eax", "edx", "memory"
5938+ );
5939+}
5940+
5941+/**
5942 * atomic64_read - read atomic64 variable
5943 * @v: pointer to type atomic64_t
5944 *
5945@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
5946 }
5947
5948 /**
5949+ * atomic64_read_unchecked - read atomic64 variable
5950+ * @v: pointer to type atomic64_unchecked_t
5951+ *
5952+ * Atomically reads the value of @v and returns it.
5953+ */
5954+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
5955+{
5956+ long long r;
5957+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
5958+ : "=A" (r), "+c" (v)
5959+ : : "memory"
5960+ );
5961+ return r;
5962+ }
5963+
5964+/**
5965 * atomic64_add_return - add and return
5966 * @i: integer value to add
5967 * @v: pointer to type atomic64_t
5968@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
5969 return i;
5970 }
5971
5972+/**
5973+ * atomic64_add_return_unchecked - add and return
5974+ * @i: integer value to add
5975+ * @v: pointer to type atomic64_unchecked_t
5976+ *
5977+ * Atomically adds @i to @v and returns @i + *@v
5978+ */
5979+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
5980+{
5981+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
5982+ : "+A" (i), "+c" (v)
5983+ : : "memory"
5984+ );
5985+ return i;
5986+}
5987+
5988 /*
5989 * Other variants with different arithmetic operators:
5990 */
5991@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
5992 return a;
5993 }
5994
5995+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5996+{
5997+ long long a;
5998+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
5999+ : "=A" (a)
6000+ : "S" (v)
6001+ : "memory", "ecx"
6002+ );
6003+ return a;
6004+}
6005+
6006 static inline long long atomic64_dec_return(atomic64_t *v)
6007 {
6008 long long a;
6009@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6010 }
6011
6012 /**
6013+ * atomic64_add_unchecked - add integer to atomic64 variable
6014+ * @i: integer value to add
6015+ * @v: pointer to type atomic64_unchecked_t
6016+ *
6017+ * Atomically adds @i to @v.
6018+ */
6019+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6020+{
6021+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6022+ : "+A" (i), "+c" (v)
6023+ : : "memory"
6024+ );
6025+ return i;
6026+}
6027+
6028+/**
6029 * atomic64_sub - subtract the atomic64 variable
6030 * @i: integer value to subtract
6031 * @v: pointer to type atomic64_t
6032diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_64.h linux-3.0.4/arch/x86/include/asm/atomic64_64.h
6033--- linux-3.0.4/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
6034+++ linux-3.0.4/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
6035@@ -18,7 +18,19 @@
6036 */
6037 static inline long atomic64_read(const atomic64_t *v)
6038 {
6039- return (*(volatile long *)&(v)->counter);
6040+ return (*(volatile const long *)&(v)->counter);
6041+}
6042+
6043+/**
6044+ * atomic64_read_unchecked - read atomic64 variable
6045+ * @v: pointer of type atomic64_unchecked_t
6046+ *
6047+ * Atomically reads the value of @v.
6048+ * Doesn't imply a read memory barrier.
6049+ */
6050+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6051+{
6052+ return (*(volatile const long *)&(v)->counter);
6053 }
6054
6055 /**
6056@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6057 }
6058
6059 /**
6060+ * atomic64_set_unchecked - set atomic64 variable
6061+ * @v: pointer to type atomic64_unchecked_t
6062+ * @i: required value
6063+ *
6064+ * Atomically sets the value of @v to @i.
6065+ */
6066+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6067+{
6068+ v->counter = i;
6069+}
6070+
6071+/**
6072 * atomic64_add - add integer to atomic64 variable
6073 * @i: integer value to add
6074 * @v: pointer to type atomic64_t
6075@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6076 */
6077 static inline void atomic64_add(long i, atomic64_t *v)
6078 {
6079+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
6080+
6081+#ifdef CONFIG_PAX_REFCOUNT
6082+ "jno 0f\n"
6083+ LOCK_PREFIX "subq %1,%0\n"
6084+ "int $4\n0:\n"
6085+ _ASM_EXTABLE(0b, 0b)
6086+#endif
6087+
6088+ : "=m" (v->counter)
6089+ : "er" (i), "m" (v->counter));
6090+}
6091+
6092+/**
6093+ * atomic64_add_unchecked - add integer to atomic64 variable
6094+ * @i: integer value to add
6095+ * @v: pointer to type atomic64_unchecked_t
6096+ *
6097+ * Atomically adds @i to @v.
6098+ */
6099+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6100+{
6101 asm volatile(LOCK_PREFIX "addq %1,%0"
6102 : "=m" (v->counter)
6103 : "er" (i), "m" (v->counter));
6104@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6105 */
6106 static inline void atomic64_sub(long i, atomic64_t *v)
6107 {
6108- asm volatile(LOCK_PREFIX "subq %1,%0"
6109+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6110+
6111+#ifdef CONFIG_PAX_REFCOUNT
6112+ "jno 0f\n"
6113+ LOCK_PREFIX "addq %1,%0\n"
6114+ "int $4\n0:\n"
6115+ _ASM_EXTABLE(0b, 0b)
6116+#endif
6117+
6118+ : "=m" (v->counter)
6119+ : "er" (i), "m" (v->counter));
6120+}
6121+
6122+/**
6123+ * atomic64_sub_unchecked - subtract the atomic64 variable
6124+ * @i: integer value to subtract
6125+ * @v: pointer to type atomic64_unchecked_t
6126+ *
6127+ * Atomically subtracts @i from @v.
6128+ */
6129+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6130+{
6131+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6132 : "=m" (v->counter)
6133 : "er" (i), "m" (v->counter));
6134 }
6135@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6136 {
6137 unsigned char c;
6138
6139- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6140+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
6141+
6142+#ifdef CONFIG_PAX_REFCOUNT
6143+ "jno 0f\n"
6144+ LOCK_PREFIX "addq %2,%0\n"
6145+ "int $4\n0:\n"
6146+ _ASM_EXTABLE(0b, 0b)
6147+#endif
6148+
6149+ "sete %1\n"
6150 : "=m" (v->counter), "=qm" (c)
6151 : "er" (i), "m" (v->counter) : "memory");
6152 return c;
6153@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6154 */
6155 static inline void atomic64_inc(atomic64_t *v)
6156 {
6157+ asm volatile(LOCK_PREFIX "incq %0\n"
6158+
6159+#ifdef CONFIG_PAX_REFCOUNT
6160+ "jno 0f\n"
6161+ LOCK_PREFIX "decq %0\n"
6162+ "int $4\n0:\n"
6163+ _ASM_EXTABLE(0b, 0b)
6164+#endif
6165+
6166+ : "=m" (v->counter)
6167+ : "m" (v->counter));
6168+}
6169+
6170+/**
6171+ * atomic64_inc_unchecked - increment atomic64 variable
6172+ * @v: pointer to type atomic64_unchecked_t
6173+ *
6174+ * Atomically increments @v by 1.
6175+ */
6176+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6177+{
6178 asm volatile(LOCK_PREFIX "incq %0"
6179 : "=m" (v->counter)
6180 : "m" (v->counter));
6181@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6182 */
6183 static inline void atomic64_dec(atomic64_t *v)
6184 {
6185- asm volatile(LOCK_PREFIX "decq %0"
6186+ asm volatile(LOCK_PREFIX "decq %0\n"
6187+
6188+#ifdef CONFIG_PAX_REFCOUNT
6189+ "jno 0f\n"
6190+ LOCK_PREFIX "incq %0\n"
6191+ "int $4\n0:\n"
6192+ _ASM_EXTABLE(0b, 0b)
6193+#endif
6194+
6195+ : "=m" (v->counter)
6196+ : "m" (v->counter));
6197+}
6198+
6199+/**
6200+ * atomic64_dec_unchecked - decrement atomic64 variable
6201+ * @v: pointer to type atomic64_t
6202+ *
6203+ * Atomically decrements @v by 1.
6204+ */
6205+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6206+{
6207+ asm volatile(LOCK_PREFIX "decq %0\n"
6208 : "=m" (v->counter)
6209 : "m" (v->counter));
6210 }
6211@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6212 {
6213 unsigned char c;
6214
6215- asm volatile(LOCK_PREFIX "decq %0; sete %1"
6216+ asm volatile(LOCK_PREFIX "decq %0\n"
6217+
6218+#ifdef CONFIG_PAX_REFCOUNT
6219+ "jno 0f\n"
6220+ LOCK_PREFIX "incq %0\n"
6221+ "int $4\n0:\n"
6222+ _ASM_EXTABLE(0b, 0b)
6223+#endif
6224+
6225+ "sete %1\n"
6226 : "=m" (v->counter), "=qm" (c)
6227 : "m" (v->counter) : "memory");
6228 return c != 0;
6229@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6230 {
6231 unsigned char c;
6232
6233- asm volatile(LOCK_PREFIX "incq %0; sete %1"
6234+ asm volatile(LOCK_PREFIX "incq %0\n"
6235+
6236+#ifdef CONFIG_PAX_REFCOUNT
6237+ "jno 0f\n"
6238+ LOCK_PREFIX "decq %0\n"
6239+ "int $4\n0:\n"
6240+ _ASM_EXTABLE(0b, 0b)
6241+#endif
6242+
6243+ "sete %1\n"
6244 : "=m" (v->counter), "=qm" (c)
6245 : "m" (v->counter) : "memory");
6246 return c != 0;
6247@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6248 {
6249 unsigned char c;
6250
6251- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6252+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
6253+
6254+#ifdef CONFIG_PAX_REFCOUNT
6255+ "jno 0f\n"
6256+ LOCK_PREFIX "subq %2,%0\n"
6257+ "int $4\n0:\n"
6258+ _ASM_EXTABLE(0b, 0b)
6259+#endif
6260+
6261+ "sets %1\n"
6262 : "=m" (v->counter), "=qm" (c)
6263 : "er" (i), "m" (v->counter) : "memory");
6264 return c;
6265@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6266 static inline long atomic64_add_return(long i, atomic64_t *v)
6267 {
6268 long __i = i;
6269- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6270+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6271+
6272+#ifdef CONFIG_PAX_REFCOUNT
6273+ "jno 0f\n"
6274+ "movq %0, %1\n"
6275+ "int $4\n0:\n"
6276+ _ASM_EXTABLE(0b, 0b)
6277+#endif
6278+
6279+ : "+r" (i), "+m" (v->counter)
6280+ : : "memory");
6281+ return i + __i;
6282+}
6283+
6284+/**
6285+ * atomic64_add_return_unchecked - add and return
6286+ * @i: integer value to add
6287+ * @v: pointer to type atomic64_unchecked_t
6288+ *
6289+ * Atomically adds @i to @v and returns @i + @v
6290+ */
6291+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6292+{
6293+ long __i = i;
6294+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
6295 : "+r" (i), "+m" (v->counter)
6296 : : "memory");
6297 return i + __i;
6298@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6299 }
6300
6301 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6302+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6303+{
6304+ return atomic64_add_return_unchecked(1, v);
6305+}
6306 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6307
6308 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6309@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6310 return cmpxchg(&v->counter, old, new);
6311 }
6312
6313+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6314+{
6315+ return cmpxchg(&v->counter, old, new);
6316+}
6317+
6318 static inline long atomic64_xchg(atomic64_t *v, long new)
6319 {
6320 return xchg(&v->counter, new);
6321@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6322 */
6323 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6324 {
6325- long c, old;
6326+ long c, old, new;
6327 c = atomic64_read(v);
6328 for (;;) {
6329- if (unlikely(c == (u)))
6330+ if (unlikely(c == u))
6331 break;
6332- old = atomic64_cmpxchg((v), c, c + (a));
6333+
6334+ asm volatile("add %2,%0\n"
6335+
6336+#ifdef CONFIG_PAX_REFCOUNT
6337+ "jno 0f\n"
6338+ "sub %2,%0\n"
6339+ "int $4\n0:\n"
6340+ _ASM_EXTABLE(0b, 0b)
6341+#endif
6342+
6343+ : "=r" (new)
6344+ : "0" (c), "ir" (a));
6345+
6346+ old = atomic64_cmpxchg(v, c, new);
6347 if (likely(old == c))
6348 break;
6349 c = old;
6350 }
6351- return c != (u);
6352+ return c != u;
6353 }
6354
6355 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6356diff -urNp linux-3.0.4/arch/x86/include/asm/atomic.h linux-3.0.4/arch/x86/include/asm/atomic.h
6357--- linux-3.0.4/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6358+++ linux-3.0.4/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6359@@ -22,7 +22,18 @@
6360 */
6361 static inline int atomic_read(const atomic_t *v)
6362 {
6363- return (*(volatile int *)&(v)->counter);
6364+ return (*(volatile const int *)&(v)->counter);
6365+}
6366+
6367+/**
6368+ * atomic_read_unchecked - read atomic variable
6369+ * @v: pointer of type atomic_unchecked_t
6370+ *
6371+ * Atomically reads the value of @v.
6372+ */
6373+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6374+{
6375+ return (*(volatile const int *)&(v)->counter);
6376 }
6377
6378 /**
6379@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6380 }
6381
6382 /**
6383+ * atomic_set_unchecked - set atomic variable
6384+ * @v: pointer of type atomic_unchecked_t
6385+ * @i: required value
6386+ *
6387+ * Atomically sets the value of @v to @i.
6388+ */
6389+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6390+{
6391+ v->counter = i;
6392+}
6393+
6394+/**
6395 * atomic_add - add integer to atomic variable
6396 * @i: integer value to add
6397 * @v: pointer of type atomic_t
6398@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6399 */
6400 static inline void atomic_add(int i, atomic_t *v)
6401 {
6402- asm volatile(LOCK_PREFIX "addl %1,%0"
6403+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6404+
6405+#ifdef CONFIG_PAX_REFCOUNT
6406+ "jno 0f\n"
6407+ LOCK_PREFIX "subl %1,%0\n"
6408+ "int $4\n0:\n"
6409+ _ASM_EXTABLE(0b, 0b)
6410+#endif
6411+
6412+ : "+m" (v->counter)
6413+ : "ir" (i));
6414+}
6415+
6416+/**
6417+ * atomic_add_unchecked - add integer to atomic variable
6418+ * @i: integer value to add
6419+ * @v: pointer of type atomic_unchecked_t
6420+ *
6421+ * Atomically adds @i to @v.
6422+ */
6423+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6424+{
6425+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6426 : "+m" (v->counter)
6427 : "ir" (i));
6428 }
6429@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6430 */
6431 static inline void atomic_sub(int i, atomic_t *v)
6432 {
6433- asm volatile(LOCK_PREFIX "subl %1,%0"
6434+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6435+
6436+#ifdef CONFIG_PAX_REFCOUNT
6437+ "jno 0f\n"
6438+ LOCK_PREFIX "addl %1,%0\n"
6439+ "int $4\n0:\n"
6440+ _ASM_EXTABLE(0b, 0b)
6441+#endif
6442+
6443+ : "+m" (v->counter)
6444+ : "ir" (i));
6445+}
6446+
6447+/**
6448+ * atomic_sub_unchecked - subtract integer from atomic variable
6449+ * @i: integer value to subtract
6450+ * @v: pointer of type atomic_unchecked_t
6451+ *
6452+ * Atomically subtracts @i from @v.
6453+ */
6454+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6455+{
6456+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6457 : "+m" (v->counter)
6458 : "ir" (i));
6459 }
6460@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6461 {
6462 unsigned char c;
6463
6464- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6465+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6466+
6467+#ifdef CONFIG_PAX_REFCOUNT
6468+ "jno 0f\n"
6469+ LOCK_PREFIX "addl %2,%0\n"
6470+ "int $4\n0:\n"
6471+ _ASM_EXTABLE(0b, 0b)
6472+#endif
6473+
6474+ "sete %1\n"
6475 : "+m" (v->counter), "=qm" (c)
6476 : "ir" (i) : "memory");
6477 return c;
6478@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6479 */
6480 static inline void atomic_inc(atomic_t *v)
6481 {
6482- asm volatile(LOCK_PREFIX "incl %0"
6483+ asm volatile(LOCK_PREFIX "incl %0\n"
6484+
6485+#ifdef CONFIG_PAX_REFCOUNT
6486+ "jno 0f\n"
6487+ LOCK_PREFIX "decl %0\n"
6488+ "int $4\n0:\n"
6489+ _ASM_EXTABLE(0b, 0b)
6490+#endif
6491+
6492+ : "+m" (v->counter));
6493+}
6494+
6495+/**
6496+ * atomic_inc_unchecked - increment atomic variable
6497+ * @v: pointer of type atomic_unchecked_t
6498+ *
6499+ * Atomically increments @v by 1.
6500+ */
6501+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6502+{
6503+ asm volatile(LOCK_PREFIX "incl %0\n"
6504 : "+m" (v->counter));
6505 }
6506
6507@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6508 */
6509 static inline void atomic_dec(atomic_t *v)
6510 {
6511- asm volatile(LOCK_PREFIX "decl %0"
6512+ asm volatile(LOCK_PREFIX "decl %0\n"
6513+
6514+#ifdef CONFIG_PAX_REFCOUNT
6515+ "jno 0f\n"
6516+ LOCK_PREFIX "incl %0\n"
6517+ "int $4\n0:\n"
6518+ _ASM_EXTABLE(0b, 0b)
6519+#endif
6520+
6521+ : "+m" (v->counter));
6522+}
6523+
6524+/**
6525+ * atomic_dec_unchecked - decrement atomic variable
6526+ * @v: pointer of type atomic_unchecked_t
6527+ *
6528+ * Atomically decrements @v by 1.
6529+ */
6530+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6531+{
6532+ asm volatile(LOCK_PREFIX "decl %0\n"
6533 : "+m" (v->counter));
6534 }
6535
6536@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6537 {
6538 unsigned char c;
6539
6540- asm volatile(LOCK_PREFIX "decl %0; sete %1"
6541+ asm volatile(LOCK_PREFIX "decl %0\n"
6542+
6543+#ifdef CONFIG_PAX_REFCOUNT
6544+ "jno 0f\n"
6545+ LOCK_PREFIX "incl %0\n"
6546+ "int $4\n0:\n"
6547+ _ASM_EXTABLE(0b, 0b)
6548+#endif
6549+
6550+ "sete %1\n"
6551 : "+m" (v->counter), "=qm" (c)
6552 : : "memory");
6553 return c != 0;
6554@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6555 {
6556 unsigned char c;
6557
6558- asm volatile(LOCK_PREFIX "incl %0; sete %1"
6559+ asm volatile(LOCK_PREFIX "incl %0\n"
6560+
6561+#ifdef CONFIG_PAX_REFCOUNT
6562+ "jno 0f\n"
6563+ LOCK_PREFIX "decl %0\n"
6564+ "int $4\n0:\n"
6565+ _ASM_EXTABLE(0b, 0b)
6566+#endif
6567+
6568+ "sete %1\n"
6569+ : "+m" (v->counter), "=qm" (c)
6570+ : : "memory");
6571+ return c != 0;
6572+}
6573+
6574+/**
6575+ * atomic_inc_and_test_unchecked - increment and test
6576+ * @v: pointer of type atomic_unchecked_t
6577+ *
6578+ * Atomically increments @v by 1
6579+ * and returns true if the result is zero, or false for all
6580+ * other cases.
6581+ */
6582+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6583+{
6584+ unsigned char c;
6585+
6586+ asm volatile(LOCK_PREFIX "incl %0\n"
6587+ "sete %1\n"
6588 : "+m" (v->counter), "=qm" (c)
6589 : : "memory");
6590 return c != 0;
6591@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6592 {
6593 unsigned char c;
6594
6595- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6596+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
6597+
6598+#ifdef CONFIG_PAX_REFCOUNT
6599+ "jno 0f\n"
6600+ LOCK_PREFIX "subl %2,%0\n"
6601+ "int $4\n0:\n"
6602+ _ASM_EXTABLE(0b, 0b)
6603+#endif
6604+
6605+ "sets %1\n"
6606 : "+m" (v->counter), "=qm" (c)
6607 : "ir" (i) : "memory");
6608 return c;
6609@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6610 #endif
6611 /* Modern 486+ processor */
6612 __i = i;
6613+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6614+
6615+#ifdef CONFIG_PAX_REFCOUNT
6616+ "jno 0f\n"
6617+ "movl %0, %1\n"
6618+ "int $4\n0:\n"
6619+ _ASM_EXTABLE(0b, 0b)
6620+#endif
6621+
6622+ : "+r" (i), "+m" (v->counter)
6623+ : : "memory");
6624+ return i + __i;
6625+
6626+#ifdef CONFIG_M386
6627+no_xadd: /* Legacy 386 processor */
6628+ local_irq_save(flags);
6629+ __i = atomic_read(v);
6630+ atomic_set(v, i + __i);
6631+ local_irq_restore(flags);
6632+ return i + __i;
6633+#endif
6634+}
6635+
6636+/**
6637+ * atomic_add_return_unchecked - add integer and return
6638+ * @v: pointer of type atomic_unchecked_t
6639+ * @i: integer value to add
6640+ *
6641+ * Atomically adds @i to @v and returns @i + @v
6642+ */
6643+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6644+{
6645+ int __i;
6646+#ifdef CONFIG_M386
6647+ unsigned long flags;
6648+ if (unlikely(boot_cpu_data.x86 <= 3))
6649+ goto no_xadd;
6650+#endif
6651+ /* Modern 486+ processor */
6652+ __i = i;
6653 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6654 : "+r" (i), "+m" (v->counter)
6655 : : "memory");
6656@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6657 }
6658
6659 #define atomic_inc_return(v) (atomic_add_return(1, v))
6660+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6661+{
6662+ return atomic_add_return_unchecked(1, v);
6663+}
6664 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6665
6666 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6667@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6668 return cmpxchg(&v->counter, old, new);
6669 }
6670
6671+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6672+{
6673+ return cmpxchg(&v->counter, old, new);
6674+}
6675+
6676 static inline int atomic_xchg(atomic_t *v, int new)
6677 {
6678 return xchg(&v->counter, new);
6679 }
6680
6681+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6682+{
6683+ return xchg(&v->counter, new);
6684+}
6685+
6686 /**
6687 * atomic_add_unless - add unless the number is already a given value
6688 * @v: pointer of type atomic_t
6689@@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6690 */
6691 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6692 {
6693- int c, old;
6694+ int c, old, new;
6695 c = atomic_read(v);
6696 for (;;) {
6697- if (unlikely(c == (u)))
6698+ if (unlikely(c == u))
6699 break;
6700- old = atomic_cmpxchg((v), c, c + (a));
6701+
6702+ asm volatile("addl %2,%0\n"
6703+
6704+#ifdef CONFIG_PAX_REFCOUNT
6705+ "jno 0f\n"
6706+ "subl %2,%0\n"
6707+ "int $4\n0:\n"
6708+ _ASM_EXTABLE(0b, 0b)
6709+#endif
6710+
6711+ : "=r" (new)
6712+ : "0" (c), "ir" (a));
6713+
6714+ old = atomic_cmpxchg(v, c, new);
6715 if (likely(old == c))
6716 break;
6717 c = old;
6718 }
6719- return c != (u);
6720+ return c != u;
6721 }
6722
6723 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6724
6725+/**
6726+ * atomic_inc_not_zero_hint - increment if not null
6727+ * @v: pointer of type atomic_t
6728+ * @hint: probable value of the atomic before the increment
6729+ *
6730+ * This version of atomic_inc_not_zero() gives a hint of probable
6731+ * value of the atomic. This helps processor to not read the memory
6732+ * before doing the atomic read/modify/write cycle, lowering
6733+ * number of bus transactions on some arches.
6734+ *
6735+ * Returns: 0 if increment was not done, 1 otherwise.
6736+ */
6737+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6738+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6739+{
6740+ int val, c = hint, new;
6741+
6742+ /* sanity test, should be removed by compiler if hint is a constant */
6743+ if (!hint)
6744+ return atomic_inc_not_zero(v);
6745+
6746+ do {
6747+ asm volatile("incl %0\n"
6748+
6749+#ifdef CONFIG_PAX_REFCOUNT
6750+ "jno 0f\n"
6751+ "decl %0\n"
6752+ "int $4\n0:\n"
6753+ _ASM_EXTABLE(0b, 0b)
6754+#endif
6755+
6756+ : "=r" (new)
6757+ : "0" (c));
6758+
6759+ val = atomic_cmpxchg(v, c, new);
6760+ if (val == c)
6761+ return 1;
6762+ c = val;
6763+ } while (c);
6764+
6765+ return 0;
6766+}
6767+
6768 /*
6769 * atomic_dec_if_positive - decrement by 1 if old value positive
6770 * @v: pointer of type atomic_t
6771diff -urNp linux-3.0.4/arch/x86/include/asm/bitops.h linux-3.0.4/arch/x86/include/asm/bitops.h
6772--- linux-3.0.4/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
6773+++ linux-3.0.4/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
6774@@ -38,7 +38,7 @@
6775 * a mask operation on a byte.
6776 */
6777 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6778-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6779+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6780 #define CONST_MASK(nr) (1 << ((nr) & 7))
6781
6782 /**
6783diff -urNp linux-3.0.4/arch/x86/include/asm/boot.h linux-3.0.4/arch/x86/include/asm/boot.h
6784--- linux-3.0.4/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
6785+++ linux-3.0.4/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
6786@@ -11,10 +11,15 @@
6787 #include <asm/pgtable_types.h>
6788
6789 /* Physical address where kernel should be loaded. */
6790-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6791+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6792 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6793 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6794
6795+#ifndef __ASSEMBLY__
6796+extern unsigned char __LOAD_PHYSICAL_ADDR[];
6797+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6798+#endif
6799+
6800 /* Minimum kernel alignment, as a power of two */
6801 #ifdef CONFIG_X86_64
6802 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6803diff -urNp linux-3.0.4/arch/x86/include/asm/cacheflush.h linux-3.0.4/arch/x86/include/asm/cacheflush.h
6804--- linux-3.0.4/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
6805+++ linux-3.0.4/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
6806@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6807 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6808
6809 if (pg_flags == _PGMT_DEFAULT)
6810- return -1;
6811+ return ~0UL;
6812 else if (pg_flags == _PGMT_WC)
6813 return _PAGE_CACHE_WC;
6814 else if (pg_flags == _PGMT_UC_MINUS)
6815diff -urNp linux-3.0.4/arch/x86/include/asm/cache.h linux-3.0.4/arch/x86/include/asm/cache.h
6816--- linux-3.0.4/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
6817+++ linux-3.0.4/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
6818@@ -5,12 +5,13 @@
6819
6820 /* L1 cache line size */
6821 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6822-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6823+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6824
6825 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6826+#define __read_only __attribute__((__section__(".data..read_only")))
6827
6828 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6829-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6830+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6831
6832 #ifdef CONFIG_X86_VSMP
6833 #ifdef CONFIG_SMP
6834diff -urNp linux-3.0.4/arch/x86/include/asm/checksum_32.h linux-3.0.4/arch/x86/include/asm/checksum_32.h
6835--- linux-3.0.4/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
6836+++ linux-3.0.4/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
6837@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6838 int len, __wsum sum,
6839 int *src_err_ptr, int *dst_err_ptr);
6840
6841+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6842+ int len, __wsum sum,
6843+ int *src_err_ptr, int *dst_err_ptr);
6844+
6845+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6846+ int len, __wsum sum,
6847+ int *src_err_ptr, int *dst_err_ptr);
6848+
6849 /*
6850 * Note: when you get a NULL pointer exception here this means someone
6851 * passed in an incorrect kernel address to one of these functions.
6852@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6853 int *err_ptr)
6854 {
6855 might_sleep();
6856- return csum_partial_copy_generic((__force void *)src, dst,
6857+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
6858 len, sum, err_ptr, NULL);
6859 }
6860
6861@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6862 {
6863 might_sleep();
6864 if (access_ok(VERIFY_WRITE, dst, len))
6865- return csum_partial_copy_generic(src, (__force void *)dst,
6866+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6867 len, sum, NULL, err_ptr);
6868
6869 if (len)
6870diff -urNp linux-3.0.4/arch/x86/include/asm/cpufeature.h linux-3.0.4/arch/x86/include/asm/cpufeature.h
6871--- linux-3.0.4/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
6872+++ linux-3.0.4/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
6873@@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
6874 ".section .discard,\"aw\",@progbits\n"
6875 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6876 ".previous\n"
6877- ".section .altinstr_replacement,\"ax\"\n"
6878+ ".section .altinstr_replacement,\"a\"\n"
6879 "3: movb $1,%0\n"
6880 "4:\n"
6881 ".previous\n"
6882diff -urNp linux-3.0.4/arch/x86/include/asm/desc_defs.h linux-3.0.4/arch/x86/include/asm/desc_defs.h
6883--- linux-3.0.4/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
6884+++ linux-3.0.4/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
6885@@ -31,6 +31,12 @@ struct desc_struct {
6886 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6887 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6888 };
6889+ struct {
6890+ u16 offset_low;
6891+ u16 seg;
6892+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6893+ unsigned offset_high: 16;
6894+ } gate;
6895 };
6896 } __attribute__((packed));
6897
6898diff -urNp linux-3.0.4/arch/x86/include/asm/desc.h linux-3.0.4/arch/x86/include/asm/desc.h
6899--- linux-3.0.4/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
6900+++ linux-3.0.4/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
6901@@ -4,6 +4,7 @@
6902 #include <asm/desc_defs.h>
6903 #include <asm/ldt.h>
6904 #include <asm/mmu.h>
6905+#include <asm/pgtable.h>
6906
6907 #include <linux/smp.h>
6908
6909@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
6910
6911 desc->type = (info->read_exec_only ^ 1) << 1;
6912 desc->type |= info->contents << 2;
6913+ desc->type |= info->seg_not_present ^ 1;
6914
6915 desc->s = 1;
6916 desc->dpl = 0x3;
6917@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
6918 }
6919
6920 extern struct desc_ptr idt_descr;
6921-extern gate_desc idt_table[];
6922-
6923-struct gdt_page {
6924- struct desc_struct gdt[GDT_ENTRIES];
6925-} __attribute__((aligned(PAGE_SIZE)));
6926-
6927-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6928+extern gate_desc idt_table[256];
6929
6930+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6931 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6932 {
6933- return per_cpu(gdt_page, cpu).gdt;
6934+ return cpu_gdt_table[cpu];
6935 }
6936
6937 #ifdef CONFIG_X86_64
6938@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
6939 unsigned long base, unsigned dpl, unsigned flags,
6940 unsigned short seg)
6941 {
6942- gate->a = (seg << 16) | (base & 0xffff);
6943- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
6944+ gate->gate.offset_low = base;
6945+ gate->gate.seg = seg;
6946+ gate->gate.reserved = 0;
6947+ gate->gate.type = type;
6948+ gate->gate.s = 0;
6949+ gate->gate.dpl = dpl;
6950+ gate->gate.p = 1;
6951+ gate->gate.offset_high = base >> 16;
6952 }
6953
6954 #endif
6955@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
6956
6957 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
6958 {
6959+ pax_open_kernel();
6960 memcpy(&idt[entry], gate, sizeof(*gate));
6961+ pax_close_kernel();
6962 }
6963
6964 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
6965 {
6966+ pax_open_kernel();
6967 memcpy(&ldt[entry], desc, 8);
6968+ pax_close_kernel();
6969 }
6970
6971 static inline void
6972@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
6973 default: size = sizeof(*gdt); break;
6974 }
6975
6976+ pax_open_kernel();
6977 memcpy(&gdt[entry], desc, size);
6978+ pax_close_kernel();
6979 }
6980
6981 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
6982@@ -207,7 +216,9 @@ static inline void native_set_ldt(const
6983
6984 static inline void native_load_tr_desc(void)
6985 {
6986+ pax_open_kernel();
6987 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
6988+ pax_close_kernel();
6989 }
6990
6991 static inline void native_load_gdt(const struct desc_ptr *dtr)
6992@@ -244,8 +255,10 @@ static inline void native_load_tls(struc
6993 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
6994 unsigned int i;
6995
6996+ pax_open_kernel();
6997 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
6998 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
6999+ pax_close_kernel();
7000 }
7001
7002 #define _LDT_empty(info) \
7003@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7004 desc->limit = (limit >> 16) & 0xf;
7005 }
7006
7007-static inline void _set_gate(int gate, unsigned type, void *addr,
7008+static inline void _set_gate(int gate, unsigned type, const void *addr,
7009 unsigned dpl, unsigned ist, unsigned seg)
7010 {
7011 gate_desc s;
7012@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7013 * Pentium F0 0F bugfix can have resulted in the mapped
7014 * IDT being write-protected.
7015 */
7016-static inline void set_intr_gate(unsigned int n, void *addr)
7017+static inline void set_intr_gate(unsigned int n, const void *addr)
7018 {
7019 BUG_ON((unsigned)n > 0xFF);
7020 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7021@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7022 /*
7023 * This routine sets up an interrupt gate at directory privilege level 3.
7024 */
7025-static inline void set_system_intr_gate(unsigned int n, void *addr)
7026+static inline void set_system_intr_gate(unsigned int n, const void *addr)
7027 {
7028 BUG_ON((unsigned)n > 0xFF);
7029 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7030 }
7031
7032-static inline void set_system_trap_gate(unsigned int n, void *addr)
7033+static inline void set_system_trap_gate(unsigned int n, const void *addr)
7034 {
7035 BUG_ON((unsigned)n > 0xFF);
7036 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7037 }
7038
7039-static inline void set_trap_gate(unsigned int n, void *addr)
7040+static inline void set_trap_gate(unsigned int n, const void *addr)
7041 {
7042 BUG_ON((unsigned)n > 0xFF);
7043 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7044@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7045 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7046 {
7047 BUG_ON((unsigned)n > 0xFF);
7048- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7049+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7050 }
7051
7052-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7053+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7054 {
7055 BUG_ON((unsigned)n > 0xFF);
7056 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7057 }
7058
7059-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7060+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7061 {
7062 BUG_ON((unsigned)n > 0xFF);
7063 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7064 }
7065
7066+#ifdef CONFIG_X86_32
7067+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7068+{
7069+ struct desc_struct d;
7070+
7071+ if (likely(limit))
7072+ limit = (limit - 1UL) >> PAGE_SHIFT;
7073+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
7074+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7075+}
7076+#endif
7077+
7078 #endif /* _ASM_X86_DESC_H */
7079diff -urNp linux-3.0.4/arch/x86/include/asm/e820.h linux-3.0.4/arch/x86/include/asm/e820.h
7080--- linux-3.0.4/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7081+++ linux-3.0.4/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7082@@ -69,7 +69,7 @@ struct e820map {
7083 #define ISA_START_ADDRESS 0xa0000
7084 #define ISA_END_ADDRESS 0x100000
7085
7086-#define BIOS_BEGIN 0x000a0000
7087+#define BIOS_BEGIN 0x000c0000
7088 #define BIOS_END 0x00100000
7089
7090 #define BIOS_ROM_BASE 0xffe00000
7091diff -urNp linux-3.0.4/arch/x86/include/asm/elf.h linux-3.0.4/arch/x86/include/asm/elf.h
7092--- linux-3.0.4/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7093+++ linux-3.0.4/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7094@@ -237,7 +237,25 @@ extern int force_personality32;
7095 the loader. We need to make sure that it is out of the way of the program
7096 that it will "exec", and that there is sufficient room for the brk. */
7097
7098+#ifdef CONFIG_PAX_SEGMEXEC
7099+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7100+#else
7101 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7102+#endif
7103+
7104+#ifdef CONFIG_PAX_ASLR
7105+#ifdef CONFIG_X86_32
7106+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7107+
7108+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7109+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7110+#else
7111+#define PAX_ELF_ET_DYN_BASE 0x400000UL
7112+
7113+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7114+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7115+#endif
7116+#endif
7117
7118 /* This yields a mask that user programs can use to figure out what
7119 instruction set this CPU supports. This could be done in user space,
7120@@ -290,9 +308,7 @@ do { \
7121
7122 #define ARCH_DLINFO \
7123 do { \
7124- if (vdso_enabled) \
7125- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7126- (unsigned long)current->mm->context.vdso); \
7127+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7128 } while (0)
7129
7130 #define AT_SYSINFO 32
7131@@ -303,7 +319,7 @@ do { \
7132
7133 #endif /* !CONFIG_X86_32 */
7134
7135-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7136+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7137
7138 #define VDSO_ENTRY \
7139 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7140@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7141 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7142 #define compat_arch_setup_additional_pages syscall32_setup_pages
7143
7144-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7145-#define arch_randomize_brk arch_randomize_brk
7146-
7147 #endif /* _ASM_X86_ELF_H */
7148diff -urNp linux-3.0.4/arch/x86/include/asm/emergency-restart.h linux-3.0.4/arch/x86/include/asm/emergency-restart.h
7149--- linux-3.0.4/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7150+++ linux-3.0.4/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7151@@ -15,6 +15,6 @@ enum reboot_type {
7152
7153 extern enum reboot_type reboot_type;
7154
7155-extern void machine_emergency_restart(void);
7156+extern void machine_emergency_restart(void) __noreturn;
7157
7158 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7159diff -urNp linux-3.0.4/arch/x86/include/asm/futex.h linux-3.0.4/arch/x86/include/asm/futex.h
7160--- linux-3.0.4/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7161+++ linux-3.0.4/arch/x86/include/asm/futex.h 2011-08-23 21:47:55.000000000 -0400
7162@@ -12,16 +12,18 @@
7163 #include <asm/system.h>
7164
7165 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7166+ typecheck(u32 *, uaddr); \
7167 asm volatile("1:\t" insn "\n" \
7168 "2:\t.section .fixup,\"ax\"\n" \
7169 "3:\tmov\t%3, %1\n" \
7170 "\tjmp\t2b\n" \
7171 "\t.previous\n" \
7172 _ASM_EXTABLE(1b, 3b) \
7173- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7174+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7175 : "i" (-EFAULT), "0" (oparg), "1" (0))
7176
7177 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7178+ typecheck(u32 *, uaddr); \
7179 asm volatile("1:\tmovl %2, %0\n" \
7180 "\tmovl\t%0, %3\n" \
7181 "\t" insn "\n" \
7182@@ -34,7 +36,7 @@
7183 _ASM_EXTABLE(1b, 4b) \
7184 _ASM_EXTABLE(2b, 4b) \
7185 : "=&a" (oldval), "=&r" (ret), \
7186- "+m" (*uaddr), "=&r" (tem) \
7187+ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7188 : "r" (oparg), "i" (-EFAULT), "1" (0))
7189
7190 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7191@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7192
7193 switch (op) {
7194 case FUTEX_OP_SET:
7195- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7196+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7197 break;
7198 case FUTEX_OP_ADD:
7199- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7200+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7201 uaddr, oparg);
7202 break;
7203 case FUTEX_OP_OR:
7204@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7205 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7206 return -EFAULT;
7207
7208- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7209+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7210 "2:\t.section .fixup, \"ax\"\n"
7211 "3:\tmov %3, %0\n"
7212 "\tjmp 2b\n"
7213 "\t.previous\n"
7214 _ASM_EXTABLE(1b, 3b)
7215- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7216+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7217 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7218 : "memory"
7219 );
7220diff -urNp linux-3.0.4/arch/x86/include/asm/hw_irq.h linux-3.0.4/arch/x86/include/asm/hw_irq.h
7221--- linux-3.0.4/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7222+++ linux-3.0.4/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7223@@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7224 extern void enable_IO_APIC(void);
7225
7226 /* Statistics */
7227-extern atomic_t irq_err_count;
7228-extern atomic_t irq_mis_count;
7229+extern atomic_unchecked_t irq_err_count;
7230+extern atomic_unchecked_t irq_mis_count;
7231
7232 /* EISA */
7233 extern void eisa_set_level_irq(unsigned int irq);
7234diff -urNp linux-3.0.4/arch/x86/include/asm/i387.h linux-3.0.4/arch/x86/include/asm/i387.h
7235--- linux-3.0.4/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7236+++ linux-3.0.4/arch/x86/include/asm/i387.h 2011-08-23 21:47:55.000000000 -0400
7237@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7238 {
7239 int err;
7240
7241+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7242+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7243+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7244+#endif
7245+
7246 /* See comment in fxsave() below. */
7247 #ifdef CONFIG_AS_FXSAVEQ
7248 asm volatile("1: fxrstorq %[fx]\n\t"
7249@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7250 {
7251 int err;
7252
7253+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7254+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7255+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7256+#endif
7257+
7258 /*
7259 * Clear the bytes not touched by the fxsave and reserved
7260 * for the SW usage.
7261@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7262 #endif /* CONFIG_X86_64 */
7263
7264 /* We need a safe address that is cheap to find and that is already
7265- in L1 during context switch. The best choices are unfortunately
7266- different for UP and SMP */
7267-#ifdef CONFIG_SMP
7268-#define safe_address (__per_cpu_offset[0])
7269-#else
7270-#define safe_address (kstat_cpu(0).cpustat.user)
7271-#endif
7272+ in L1 during context switch. */
7273+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7274
7275 /*
7276 * These must be called with preempt disabled
7277@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7278 struct thread_info *me = current_thread_info();
7279 preempt_disable();
7280 if (me->status & TS_USEDFPU)
7281- __save_init_fpu(me->task);
7282+ __save_init_fpu(current);
7283 else
7284 clts();
7285 }
7286diff -urNp linux-3.0.4/arch/x86/include/asm/io.h linux-3.0.4/arch/x86/include/asm/io.h
7287--- linux-3.0.4/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7288+++ linux-3.0.4/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7289@@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7290
7291 #include <linux/vmalloc.h>
7292
7293+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7294+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7295+{
7296+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7297+}
7298+
7299+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7300+{
7301+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7302+}
7303+
7304 /*
7305 * Convert a virtual cached pointer to an uncached pointer
7306 */
7307diff -urNp linux-3.0.4/arch/x86/include/asm/irqflags.h linux-3.0.4/arch/x86/include/asm/irqflags.h
7308--- linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7309+++ linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7310@@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7311 sti; \
7312 sysexit
7313
7314+#define GET_CR0_INTO_RDI mov %cr0, %rdi
7315+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7316+#define GET_CR3_INTO_RDI mov %cr3, %rdi
7317+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7318+
7319 #else
7320 #define INTERRUPT_RETURN iret
7321 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7322diff -urNp linux-3.0.4/arch/x86/include/asm/kprobes.h linux-3.0.4/arch/x86/include/asm/kprobes.h
7323--- linux-3.0.4/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
7324+++ linux-3.0.4/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
7325@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7326 #define RELATIVEJUMP_SIZE 5
7327 #define RELATIVECALL_OPCODE 0xe8
7328 #define RELATIVE_ADDR_SIZE 4
7329-#define MAX_STACK_SIZE 64
7330-#define MIN_STACK_SIZE(ADDR) \
7331- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7332- THREAD_SIZE - (unsigned long)(ADDR))) \
7333- ? (MAX_STACK_SIZE) \
7334- : (((unsigned long)current_thread_info()) + \
7335- THREAD_SIZE - (unsigned long)(ADDR)))
7336+#define MAX_STACK_SIZE 64UL
7337+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7338
7339 #define flush_insn_slot(p) do { } while (0)
7340
7341diff -urNp linux-3.0.4/arch/x86/include/asm/kvm_host.h linux-3.0.4/arch/x86/include/asm/kvm_host.h
7342--- linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7343+++ linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-08-26 19:49:56.000000000 -0400
7344@@ -441,7 +441,7 @@ struct kvm_arch {
7345 unsigned int n_used_mmu_pages;
7346 unsigned int n_requested_mmu_pages;
7347 unsigned int n_max_mmu_pages;
7348- atomic_t invlpg_counter;
7349+ atomic_unchecked_t invlpg_counter;
7350 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7351 /*
7352 * Hash table of struct kvm_mmu_page.
7353@@ -619,7 +619,7 @@ struct kvm_x86_ops {
7354 enum x86_intercept_stage stage);
7355
7356 const struct trace_print_flags *exit_reasons_str;
7357-};
7358+} __do_const;
7359
7360 struct kvm_arch_async_pf {
7361 u32 token;
7362diff -urNp linux-3.0.4/arch/x86/include/asm/local.h linux-3.0.4/arch/x86/include/asm/local.h
7363--- linux-3.0.4/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
7364+++ linux-3.0.4/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
7365@@ -18,26 +18,58 @@ typedef struct {
7366
7367 static inline void local_inc(local_t *l)
7368 {
7369- asm volatile(_ASM_INC "%0"
7370+ asm volatile(_ASM_INC "%0\n"
7371+
7372+#ifdef CONFIG_PAX_REFCOUNT
7373+ "jno 0f\n"
7374+ _ASM_DEC "%0\n"
7375+ "int $4\n0:\n"
7376+ _ASM_EXTABLE(0b, 0b)
7377+#endif
7378+
7379 : "+m" (l->a.counter));
7380 }
7381
7382 static inline void local_dec(local_t *l)
7383 {
7384- asm volatile(_ASM_DEC "%0"
7385+ asm volatile(_ASM_DEC "%0\n"
7386+
7387+#ifdef CONFIG_PAX_REFCOUNT
7388+ "jno 0f\n"
7389+ _ASM_INC "%0\n"
7390+ "int $4\n0:\n"
7391+ _ASM_EXTABLE(0b, 0b)
7392+#endif
7393+
7394 : "+m" (l->a.counter));
7395 }
7396
7397 static inline void local_add(long i, local_t *l)
7398 {
7399- asm volatile(_ASM_ADD "%1,%0"
7400+ asm volatile(_ASM_ADD "%1,%0\n"
7401+
7402+#ifdef CONFIG_PAX_REFCOUNT
7403+ "jno 0f\n"
7404+ _ASM_SUB "%1,%0\n"
7405+ "int $4\n0:\n"
7406+ _ASM_EXTABLE(0b, 0b)
7407+#endif
7408+
7409 : "+m" (l->a.counter)
7410 : "ir" (i));
7411 }
7412
7413 static inline void local_sub(long i, local_t *l)
7414 {
7415- asm volatile(_ASM_SUB "%1,%0"
7416+ asm volatile(_ASM_SUB "%1,%0\n"
7417+
7418+#ifdef CONFIG_PAX_REFCOUNT
7419+ "jno 0f\n"
7420+ _ASM_ADD "%1,%0\n"
7421+ "int $4\n0:\n"
7422+ _ASM_EXTABLE(0b, 0b)
7423+#endif
7424+
7425 : "+m" (l->a.counter)
7426 : "ir" (i));
7427 }
7428@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7429 {
7430 unsigned char c;
7431
7432- asm volatile(_ASM_SUB "%2,%0; sete %1"
7433+ asm volatile(_ASM_SUB "%2,%0\n"
7434+
7435+#ifdef CONFIG_PAX_REFCOUNT
7436+ "jno 0f\n"
7437+ _ASM_ADD "%2,%0\n"
7438+ "int $4\n0:\n"
7439+ _ASM_EXTABLE(0b, 0b)
7440+#endif
7441+
7442+ "sete %1\n"
7443 : "+m" (l->a.counter), "=qm" (c)
7444 : "ir" (i) : "memory");
7445 return c;
7446@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7447 {
7448 unsigned char c;
7449
7450- asm volatile(_ASM_DEC "%0; sete %1"
7451+ asm volatile(_ASM_DEC "%0\n"
7452+
7453+#ifdef CONFIG_PAX_REFCOUNT
7454+ "jno 0f\n"
7455+ _ASM_INC "%0\n"
7456+ "int $4\n0:\n"
7457+ _ASM_EXTABLE(0b, 0b)
7458+#endif
7459+
7460+ "sete %1\n"
7461 : "+m" (l->a.counter), "=qm" (c)
7462 : : "memory");
7463 return c != 0;
7464@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7465 {
7466 unsigned char c;
7467
7468- asm volatile(_ASM_INC "%0; sete %1"
7469+ asm volatile(_ASM_INC "%0\n"
7470+
7471+#ifdef CONFIG_PAX_REFCOUNT
7472+ "jno 0f\n"
7473+ _ASM_DEC "%0\n"
7474+ "int $4\n0:\n"
7475+ _ASM_EXTABLE(0b, 0b)
7476+#endif
7477+
7478+ "sete %1\n"
7479 : "+m" (l->a.counter), "=qm" (c)
7480 : : "memory");
7481 return c != 0;
7482@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7483 {
7484 unsigned char c;
7485
7486- asm volatile(_ASM_ADD "%2,%0; sets %1"
7487+ asm volatile(_ASM_ADD "%2,%0\n"
7488+
7489+#ifdef CONFIG_PAX_REFCOUNT
7490+ "jno 0f\n"
7491+ _ASM_SUB "%2,%0\n"
7492+ "int $4\n0:\n"
7493+ _ASM_EXTABLE(0b, 0b)
7494+#endif
7495+
7496+ "sets %1\n"
7497 : "+m" (l->a.counter), "=qm" (c)
7498 : "ir" (i) : "memory");
7499 return c;
7500@@ -133,7 +201,15 @@ static inline long local_add_return(long
7501 #endif
7502 /* Modern 486+ processor */
7503 __i = i;
7504- asm volatile(_ASM_XADD "%0, %1;"
7505+ asm volatile(_ASM_XADD "%0, %1\n"
7506+
7507+#ifdef CONFIG_PAX_REFCOUNT
7508+ "jno 0f\n"
7509+ _ASM_MOV "%0,%1\n"
7510+ "int $4\n0:\n"
7511+ _ASM_EXTABLE(0b, 0b)
7512+#endif
7513+
7514 : "+r" (i), "+m" (l->a.counter)
7515 : : "memory");
7516 return i + __i;
7517diff -urNp linux-3.0.4/arch/x86/include/asm/mman.h linux-3.0.4/arch/x86/include/asm/mman.h
7518--- linux-3.0.4/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
7519+++ linux-3.0.4/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
7520@@ -5,4 +5,14 @@
7521
7522 #include <asm-generic/mman.h>
7523
7524+#ifdef __KERNEL__
7525+#ifndef __ASSEMBLY__
7526+#ifdef CONFIG_X86_32
7527+#define arch_mmap_check i386_mmap_check
7528+int i386_mmap_check(unsigned long addr, unsigned long len,
7529+ unsigned long flags);
7530+#endif
7531+#endif
7532+#endif
7533+
7534 #endif /* _ASM_X86_MMAN_H */
7535diff -urNp linux-3.0.4/arch/x86/include/asm/mmu_context.h linux-3.0.4/arch/x86/include/asm/mmu_context.h
7536--- linux-3.0.4/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
7537+++ linux-3.0.4/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
7538@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7539
7540 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7541 {
7542+
7543+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7544+ unsigned int i;
7545+ pgd_t *pgd;
7546+
7547+ pax_open_kernel();
7548+ pgd = get_cpu_pgd(smp_processor_id());
7549+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7550+ set_pgd_batched(pgd+i, native_make_pgd(0));
7551+ pax_close_kernel();
7552+#endif
7553+
7554 #ifdef CONFIG_SMP
7555 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7556 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7557@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7558 struct task_struct *tsk)
7559 {
7560 unsigned cpu = smp_processor_id();
7561+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7562+ int tlbstate = TLBSTATE_OK;
7563+#endif
7564
7565 if (likely(prev != next)) {
7566 #ifdef CONFIG_SMP
7567+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7568+ tlbstate = percpu_read(cpu_tlbstate.state);
7569+#endif
7570 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7571 percpu_write(cpu_tlbstate.active_mm, next);
7572 #endif
7573 cpumask_set_cpu(cpu, mm_cpumask(next));
7574
7575 /* Re-load page tables */
7576+#ifdef CONFIG_PAX_PER_CPU_PGD
7577+ pax_open_kernel();
7578+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7579+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7580+ pax_close_kernel();
7581+ load_cr3(get_cpu_pgd(cpu));
7582+#else
7583 load_cr3(next->pgd);
7584+#endif
7585
7586 /* stop flush ipis for the previous mm */
7587 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7588@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7589 */
7590 if (unlikely(prev->context.ldt != next->context.ldt))
7591 load_LDT_nolock(&next->context);
7592- }
7593+
7594+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7595+ if (!(__supported_pte_mask & _PAGE_NX)) {
7596+ smp_mb__before_clear_bit();
7597+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7598+ smp_mb__after_clear_bit();
7599+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7600+ }
7601+#endif
7602+
7603+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7604+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7605+ prev->context.user_cs_limit != next->context.user_cs_limit))
7606+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7607 #ifdef CONFIG_SMP
7608+ else if (unlikely(tlbstate != TLBSTATE_OK))
7609+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7610+#endif
7611+#endif
7612+
7613+ }
7614 else {
7615+
7616+#ifdef CONFIG_PAX_PER_CPU_PGD
7617+ pax_open_kernel();
7618+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7619+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7620+ pax_close_kernel();
7621+ load_cr3(get_cpu_pgd(cpu));
7622+#endif
7623+
7624+#ifdef CONFIG_SMP
7625 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7626 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7627
7628@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7629 * tlb flush IPI delivery. We must reload CR3
7630 * to make sure to use no freed page tables.
7631 */
7632+
7633+#ifndef CONFIG_PAX_PER_CPU_PGD
7634 load_cr3(next->pgd);
7635+#endif
7636+
7637 load_LDT_nolock(&next->context);
7638+
7639+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7640+ if (!(__supported_pte_mask & _PAGE_NX))
7641+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7642+#endif
7643+
7644+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7645+#ifdef CONFIG_PAX_PAGEEXEC
7646+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7647+#endif
7648+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7649+#endif
7650+
7651 }
7652- }
7653 #endif
7654+ }
7655 }
7656
7657 #define activate_mm(prev, next) \
7658diff -urNp linux-3.0.4/arch/x86/include/asm/mmu.h linux-3.0.4/arch/x86/include/asm/mmu.h
7659--- linux-3.0.4/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
7660+++ linux-3.0.4/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
7661@@ -9,7 +9,7 @@
7662 * we put the segment information here.
7663 */
7664 typedef struct {
7665- void *ldt;
7666+ struct desc_struct *ldt;
7667 int size;
7668
7669 #ifdef CONFIG_X86_64
7670@@ -18,7 +18,19 @@ typedef struct {
7671 #endif
7672
7673 struct mutex lock;
7674- void *vdso;
7675+ unsigned long vdso;
7676+
7677+#ifdef CONFIG_X86_32
7678+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7679+ unsigned long user_cs_base;
7680+ unsigned long user_cs_limit;
7681+
7682+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7683+ cpumask_t cpu_user_cs_mask;
7684+#endif
7685+
7686+#endif
7687+#endif
7688 } mm_context_t;
7689
7690 #ifdef CONFIG_SMP
7691diff -urNp linux-3.0.4/arch/x86/include/asm/module.h linux-3.0.4/arch/x86/include/asm/module.h
7692--- linux-3.0.4/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
7693+++ linux-3.0.4/arch/x86/include/asm/module.h 2011-08-23 21:48:14.000000000 -0400
7694@@ -5,6 +5,7 @@
7695
7696 #ifdef CONFIG_X86_64
7697 /* X86_64 does not define MODULE_PROC_FAMILY */
7698+#define MODULE_PROC_FAMILY ""
7699 #elif defined CONFIG_M386
7700 #define MODULE_PROC_FAMILY "386 "
7701 #elif defined CONFIG_M486
7702@@ -59,8 +60,30 @@
7703 #error unknown processor family
7704 #endif
7705
7706-#ifdef CONFIG_X86_32
7707-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7708+#ifdef CONFIG_PAX_MEMORY_UDEREF
7709+#define MODULE_PAX_UDEREF "UDEREF "
7710+#else
7711+#define MODULE_PAX_UDEREF ""
7712+#endif
7713+
7714+#ifdef CONFIG_PAX_KERNEXEC
7715+#define MODULE_PAX_KERNEXEC "KERNEXEC "
7716+#else
7717+#define MODULE_PAX_KERNEXEC ""
7718 #endif
7719
7720+#ifdef CONFIG_PAX_REFCOUNT
7721+#define MODULE_PAX_REFCOUNT "REFCOUNT "
7722+#else
7723+#define MODULE_PAX_REFCOUNT ""
7724+#endif
7725+
7726+#ifdef CONFIG_GRKERNSEC
7727+#define MODULE_GRSEC "GRSECURITY "
7728+#else
7729+#define MODULE_GRSEC ""
7730+#endif
7731+
7732+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7733+
7734 #endif /* _ASM_X86_MODULE_H */
7735diff -urNp linux-3.0.4/arch/x86/include/asm/page_64_types.h linux-3.0.4/arch/x86/include/asm/page_64_types.h
7736--- linux-3.0.4/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
7737+++ linux-3.0.4/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
7738@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7739
7740 /* duplicated to the one in bootmem.h */
7741 extern unsigned long max_pfn;
7742-extern unsigned long phys_base;
7743+extern const unsigned long phys_base;
7744
7745 extern unsigned long __phys_addr(unsigned long);
7746 #define __phys_reloc_hide(x) (x)
7747diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt.h linux-3.0.4/arch/x86/include/asm/paravirt.h
7748--- linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7749+++ linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7750@@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
7751 val);
7752 }
7753
7754+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7755+{
7756+ pgdval_t val = native_pgd_val(pgd);
7757+
7758+ if (sizeof(pgdval_t) > sizeof(long))
7759+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7760+ val, (u64)val >> 32);
7761+ else
7762+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7763+ val);
7764+}
7765+
7766 static inline void pgd_clear(pgd_t *pgdp)
7767 {
7768 set_pgd(pgdp, __pgd(0));
7769@@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
7770 pv_mmu_ops.set_fixmap(idx, phys, flags);
7771 }
7772
7773+#ifdef CONFIG_PAX_KERNEXEC
7774+static inline unsigned long pax_open_kernel(void)
7775+{
7776+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7777+}
7778+
7779+static inline unsigned long pax_close_kernel(void)
7780+{
7781+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7782+}
7783+#else
7784+static inline unsigned long pax_open_kernel(void) { return 0; }
7785+static inline unsigned long pax_close_kernel(void) { return 0; }
7786+#endif
7787+
7788 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7789
7790 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7791@@ -955,7 +982,7 @@ extern void default_banner(void);
7792
7793 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7794 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7795-#define PARA_INDIRECT(addr) *%cs:addr
7796+#define PARA_INDIRECT(addr) *%ss:addr
7797 #endif
7798
7799 #define INTERRUPT_RETURN \
7800@@ -1032,6 +1059,21 @@ extern void default_banner(void);
7801 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7802 CLBR_NONE, \
7803 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7804+
7805+#define GET_CR0_INTO_RDI \
7806+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7807+ mov %rax,%rdi
7808+
7809+#define SET_RDI_INTO_CR0 \
7810+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7811+
7812+#define GET_CR3_INTO_RDI \
7813+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7814+ mov %rax,%rdi
7815+
7816+#define SET_RDI_INTO_CR3 \
7817+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7818+
7819 #endif /* CONFIG_X86_32 */
7820
7821 #endif /* __ASSEMBLY__ */
7822diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt_types.h linux-3.0.4/arch/x86/include/asm/paravirt_types.h
7823--- linux-3.0.4/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
7824+++ linux-3.0.4/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
7825@@ -78,19 +78,19 @@ struct pv_init_ops {
7826 */
7827 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7828 unsigned long addr, unsigned len);
7829-};
7830+} __no_const;
7831
7832
7833 struct pv_lazy_ops {
7834 /* Set deferred update mode, used for batching operations. */
7835 void (*enter)(void);
7836 void (*leave)(void);
7837-};
7838+} __no_const;
7839
7840 struct pv_time_ops {
7841 unsigned long long (*sched_clock)(void);
7842 unsigned long (*get_tsc_khz)(void);
7843-};
7844+} __no_const;
7845
7846 struct pv_cpu_ops {
7847 /* hooks for various privileged instructions */
7848@@ -186,7 +186,7 @@ struct pv_cpu_ops {
7849
7850 void (*start_context_switch)(struct task_struct *prev);
7851 void (*end_context_switch)(struct task_struct *next);
7852-};
7853+} __no_const;
7854
7855 struct pv_irq_ops {
7856 /*
7857@@ -217,7 +217,7 @@ struct pv_apic_ops {
7858 unsigned long start_eip,
7859 unsigned long start_esp);
7860 #endif
7861-};
7862+} __no_const;
7863
7864 struct pv_mmu_ops {
7865 unsigned long (*read_cr2)(void);
7866@@ -306,6 +306,7 @@ struct pv_mmu_ops {
7867 struct paravirt_callee_save make_pud;
7868
7869 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
7870+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
7871 #endif /* PAGETABLE_LEVELS == 4 */
7872 #endif /* PAGETABLE_LEVELS >= 3 */
7873
7874@@ -317,6 +318,12 @@ struct pv_mmu_ops {
7875 an mfn. We can tell which is which from the index. */
7876 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7877 phys_addr_t phys, pgprot_t flags);
7878+
7879+#ifdef CONFIG_PAX_KERNEXEC
7880+ unsigned long (*pax_open_kernel)(void);
7881+ unsigned long (*pax_close_kernel)(void);
7882+#endif
7883+
7884 };
7885
7886 struct arch_spinlock;
7887@@ -327,7 +334,7 @@ struct pv_lock_ops {
7888 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7889 int (*spin_trylock)(struct arch_spinlock *lock);
7890 void (*spin_unlock)(struct arch_spinlock *lock);
7891-};
7892+} __no_const;
7893
7894 /* This contains all the paravirt structures: we get a convenient
7895 * number for each function using the offset which we use to indicate
7896diff -urNp linux-3.0.4/arch/x86/include/asm/pgalloc.h linux-3.0.4/arch/x86/include/asm/pgalloc.h
7897--- linux-3.0.4/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
7898+++ linux-3.0.4/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
7899@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7900 pmd_t *pmd, pte_t *pte)
7901 {
7902 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7903+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7904+}
7905+
7906+static inline void pmd_populate_user(struct mm_struct *mm,
7907+ pmd_t *pmd, pte_t *pte)
7908+{
7909+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7910 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7911 }
7912
7913diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-2level.h linux-3.0.4/arch/x86/include/asm/pgtable-2level.h
7914--- linux-3.0.4/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
7915+++ linux-3.0.4/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
7916@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7917
7918 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7919 {
7920+ pax_open_kernel();
7921 *pmdp = pmd;
7922+ pax_close_kernel();
7923 }
7924
7925 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7926diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32.h linux-3.0.4/arch/x86/include/asm/pgtable_32.h
7927--- linux-3.0.4/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
7928+++ linux-3.0.4/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
7929@@ -25,9 +25,6 @@
7930 struct mm_struct;
7931 struct vm_area_struct;
7932
7933-extern pgd_t swapper_pg_dir[1024];
7934-extern pgd_t initial_page_table[1024];
7935-
7936 static inline void pgtable_cache_init(void) { }
7937 static inline void check_pgt_cache(void) { }
7938 void paging_init(void);
7939@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
7940 # include <asm/pgtable-2level.h>
7941 #endif
7942
7943+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
7944+extern pgd_t initial_page_table[PTRS_PER_PGD];
7945+#ifdef CONFIG_X86_PAE
7946+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
7947+#endif
7948+
7949 #if defined(CONFIG_HIGHPTE)
7950 #define pte_offset_map(dir, address) \
7951 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
7952@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
7953 /* Clear a kernel PTE and flush it from the TLB */
7954 #define kpte_clear_flush(ptep, vaddr) \
7955 do { \
7956+ pax_open_kernel(); \
7957 pte_clear(&init_mm, (vaddr), (ptep)); \
7958+ pax_close_kernel(); \
7959 __flush_tlb_one((vaddr)); \
7960 } while (0)
7961
7962@@ -74,6 +79,9 @@ do { \
7963
7964 #endif /* !__ASSEMBLY__ */
7965
7966+#define HAVE_ARCH_UNMAPPED_AREA
7967+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
7968+
7969 /*
7970 * kern_addr_valid() is (1) for FLATMEM and (0) for
7971 * SPARSEMEM and DISCONTIGMEM
7972diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h
7973--- linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
7974+++ linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
7975@@ -8,7 +8,7 @@
7976 */
7977 #ifdef CONFIG_X86_PAE
7978 # include <asm/pgtable-3level_types.h>
7979-# define PMD_SIZE (1UL << PMD_SHIFT)
7980+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
7981 # define PMD_MASK (~(PMD_SIZE - 1))
7982 #else
7983 # include <asm/pgtable-2level_types.h>
7984@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
7985 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
7986 #endif
7987
7988+#ifdef CONFIG_PAX_KERNEXEC
7989+#ifndef __ASSEMBLY__
7990+extern unsigned char MODULES_EXEC_VADDR[];
7991+extern unsigned char MODULES_EXEC_END[];
7992+#endif
7993+#include <asm/boot.h>
7994+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
7995+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
7996+#else
7997+#define ktla_ktva(addr) (addr)
7998+#define ktva_ktla(addr) (addr)
7999+#endif
8000+
8001 #define MODULES_VADDR VMALLOC_START
8002 #define MODULES_END VMALLOC_END
8003 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8004diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-3level.h linux-3.0.4/arch/x86/include/asm/pgtable-3level.h
8005--- linux-3.0.4/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8006+++ linux-3.0.4/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8007@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8008
8009 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8010 {
8011+ pax_open_kernel();
8012 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8013+ pax_close_kernel();
8014 }
8015
8016 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8017 {
8018+ pax_open_kernel();
8019 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8020+ pax_close_kernel();
8021 }
8022
8023 /*
8024diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64.h linux-3.0.4/arch/x86/include/asm/pgtable_64.h
8025--- linux-3.0.4/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8026+++ linux-3.0.4/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8027@@ -16,10 +16,13 @@
8028
8029 extern pud_t level3_kernel_pgt[512];
8030 extern pud_t level3_ident_pgt[512];
8031+extern pud_t level3_vmalloc_pgt[512];
8032+extern pud_t level3_vmemmap_pgt[512];
8033+extern pud_t level2_vmemmap_pgt[512];
8034 extern pmd_t level2_kernel_pgt[512];
8035 extern pmd_t level2_fixmap_pgt[512];
8036-extern pmd_t level2_ident_pgt[512];
8037-extern pgd_t init_level4_pgt[];
8038+extern pmd_t level2_ident_pgt[512*2];
8039+extern pgd_t init_level4_pgt[512];
8040
8041 #define swapper_pg_dir init_level4_pgt
8042
8043@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8044
8045 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8046 {
8047+ pax_open_kernel();
8048 *pmdp = pmd;
8049+ pax_close_kernel();
8050 }
8051
8052 static inline void native_pmd_clear(pmd_t *pmd)
8053@@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8054
8055 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8056 {
8057+ pax_open_kernel();
8058+ *pgdp = pgd;
8059+ pax_close_kernel();
8060+}
8061+
8062+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8063+{
8064 *pgdp = pgd;
8065 }
8066
8067diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h
8068--- linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8069+++ linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8070@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8071 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8072 #define MODULES_END _AC(0xffffffffff000000, UL)
8073 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8074+#define MODULES_EXEC_VADDR MODULES_VADDR
8075+#define MODULES_EXEC_END MODULES_END
8076+
8077+#define ktla_ktva(addr) (addr)
8078+#define ktva_ktla(addr) (addr)
8079
8080 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8081diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable.h linux-3.0.4/arch/x86/include/asm/pgtable.h
8082--- linux-3.0.4/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8083+++ linux-3.0.4/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8084@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8085
8086 #ifndef __PAGETABLE_PUD_FOLDED
8087 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8088+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8089 #define pgd_clear(pgd) native_pgd_clear(pgd)
8090 #endif
8091
8092@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8093
8094 #define arch_end_context_switch(prev) do {} while(0)
8095
8096+#define pax_open_kernel() native_pax_open_kernel()
8097+#define pax_close_kernel() native_pax_close_kernel()
8098 #endif /* CONFIG_PARAVIRT */
8099
8100+#define __HAVE_ARCH_PAX_OPEN_KERNEL
8101+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8102+
8103+#ifdef CONFIG_PAX_KERNEXEC
8104+static inline unsigned long native_pax_open_kernel(void)
8105+{
8106+ unsigned long cr0;
8107+
8108+ preempt_disable();
8109+ barrier();
8110+ cr0 = read_cr0() ^ X86_CR0_WP;
8111+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
8112+ write_cr0(cr0);
8113+ return cr0 ^ X86_CR0_WP;
8114+}
8115+
8116+static inline unsigned long native_pax_close_kernel(void)
8117+{
8118+ unsigned long cr0;
8119+
8120+ cr0 = read_cr0() ^ X86_CR0_WP;
8121+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8122+ write_cr0(cr0);
8123+ barrier();
8124+ preempt_enable_no_resched();
8125+ return cr0 ^ X86_CR0_WP;
8126+}
8127+#else
8128+static inline unsigned long native_pax_open_kernel(void) { return 0; }
8129+static inline unsigned long native_pax_close_kernel(void) { return 0; }
8130+#endif
8131+
8132 /*
8133 * The following only work if pte_present() is true.
8134 * Undefined behaviour if not..
8135 */
8136+static inline int pte_user(pte_t pte)
8137+{
8138+ return pte_val(pte) & _PAGE_USER;
8139+}
8140+
8141 static inline int pte_dirty(pte_t pte)
8142 {
8143 return pte_flags(pte) & _PAGE_DIRTY;
8144@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8145 return pte_clear_flags(pte, _PAGE_RW);
8146 }
8147
8148+static inline pte_t pte_mkread(pte_t pte)
8149+{
8150+ return __pte(pte_val(pte) | _PAGE_USER);
8151+}
8152+
8153 static inline pte_t pte_mkexec(pte_t pte)
8154 {
8155- return pte_clear_flags(pte, _PAGE_NX);
8156+#ifdef CONFIG_X86_PAE
8157+ if (__supported_pte_mask & _PAGE_NX)
8158+ return pte_clear_flags(pte, _PAGE_NX);
8159+ else
8160+#endif
8161+ return pte_set_flags(pte, _PAGE_USER);
8162+}
8163+
8164+static inline pte_t pte_exprotect(pte_t pte)
8165+{
8166+#ifdef CONFIG_X86_PAE
8167+ if (__supported_pte_mask & _PAGE_NX)
8168+ return pte_set_flags(pte, _PAGE_NX);
8169+ else
8170+#endif
8171+ return pte_clear_flags(pte, _PAGE_USER);
8172 }
8173
8174 static inline pte_t pte_mkdirty(pte_t pte)
8175@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8176 #endif
8177
8178 #ifndef __ASSEMBLY__
8179+
8180+#ifdef CONFIG_PAX_PER_CPU_PGD
8181+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8182+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8183+{
8184+ return cpu_pgd[cpu];
8185+}
8186+#endif
8187+
8188 #include <linux/mm_types.h>
8189
8190 static inline int pte_none(pte_t pte)
8191@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8192
8193 static inline int pgd_bad(pgd_t pgd)
8194 {
8195- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8196+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8197 }
8198
8199 static inline int pgd_none(pgd_t pgd)
8200@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8201 * pgd_offset() returns a (pgd_t *)
8202 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8203 */
8204-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8205+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8206+
8207+#ifdef CONFIG_PAX_PER_CPU_PGD
8208+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8209+#endif
8210+
8211 /*
8212 * a shortcut which implies the use of the kernel's pgd, instead
8213 * of a process's
8214@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8215 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8216 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8217
8218+#ifdef CONFIG_X86_32
8219+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8220+#else
8221+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8222+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8223+
8224+#ifdef CONFIG_PAX_MEMORY_UDEREF
8225+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8226+#else
8227+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8228+#endif
8229+
8230+#endif
8231+
8232 #ifndef __ASSEMBLY__
8233
8234 extern int direct_gbpages;
8235@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8236 * dst and src can be on the same page, but the range must not overlap,
8237 * and must not cross a page boundary.
8238 */
8239-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8240+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8241 {
8242- memcpy(dst, src, count * sizeof(pgd_t));
8243+ pax_open_kernel();
8244+ while (count--)
8245+ *dst++ = *src++;
8246+ pax_close_kernel();
8247 }
8248
8249+#ifdef CONFIG_PAX_PER_CPU_PGD
8250+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8251+#endif
8252+
8253+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8254+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8255+#else
8256+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8257+#endif
8258
8259 #include <asm-generic/pgtable.h>
8260 #endif /* __ASSEMBLY__ */
8261diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_types.h linux-3.0.4/arch/x86/include/asm/pgtable_types.h
8262--- linux-3.0.4/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8263+++ linux-3.0.4/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8264@@ -16,13 +16,12 @@
8265 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8266 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8267 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8268-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8269+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8270 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8271 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8272 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8273-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8274-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8275-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8276+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8277+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8278 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8279
8280 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8281@@ -40,7 +39,6 @@
8282 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8283 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8284 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8285-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8286 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8287 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8288 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8289@@ -57,8 +55,10 @@
8290
8291 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8292 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8293-#else
8294+#elif defined(CONFIG_KMEMCHECK)
8295 #define _PAGE_NX (_AT(pteval_t, 0))
8296+#else
8297+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8298 #endif
8299
8300 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8301@@ -96,6 +96,9 @@
8302 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8303 _PAGE_ACCESSED)
8304
8305+#define PAGE_READONLY_NOEXEC PAGE_READONLY
8306+#define PAGE_SHARED_NOEXEC PAGE_SHARED
8307+
8308 #define __PAGE_KERNEL_EXEC \
8309 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8310 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8311@@ -106,8 +109,8 @@
8312 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8313 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8314 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8315-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8316-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8317+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8318+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8319 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8320 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8321 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8322@@ -166,8 +169,8 @@
8323 * bits are combined, this will alow user to access the high address mapped
8324 * VDSO in the presence of CONFIG_COMPAT_VDSO
8325 */
8326-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8327-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8328+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8329+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8330 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8331 #endif
8332
8333@@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8334 {
8335 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8336 }
8337+#endif
8338
8339+#if PAGETABLE_LEVELS == 3
8340+#include <asm-generic/pgtable-nopud.h>
8341+#endif
8342+
8343+#if PAGETABLE_LEVELS == 2
8344+#include <asm-generic/pgtable-nopmd.h>
8345+#endif
8346+
8347+#ifndef __ASSEMBLY__
8348 #if PAGETABLE_LEVELS > 3
8349 typedef struct { pudval_t pud; } pud_t;
8350
8351@@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8352 return pud.pud;
8353 }
8354 #else
8355-#include <asm-generic/pgtable-nopud.h>
8356-
8357 static inline pudval_t native_pud_val(pud_t pud)
8358 {
8359 return native_pgd_val(pud.pgd);
8360@@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8361 return pmd.pmd;
8362 }
8363 #else
8364-#include <asm-generic/pgtable-nopmd.h>
8365-
8366 static inline pmdval_t native_pmd_val(pmd_t pmd)
8367 {
8368 return native_pgd_val(pmd.pud.pgd);
8369@@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8370
8371 extern pteval_t __supported_pte_mask;
8372 extern void set_nx(void);
8373-extern int nx_enabled;
8374
8375 #define pgprot_writecombine pgprot_writecombine
8376 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8377diff -urNp linux-3.0.4/arch/x86/include/asm/processor.h linux-3.0.4/arch/x86/include/asm/processor.h
8378--- linux-3.0.4/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
8379+++ linux-3.0.4/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
8380@@ -266,7 +266,7 @@ struct tss_struct {
8381
8382 } ____cacheline_aligned;
8383
8384-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8385+extern struct tss_struct init_tss[NR_CPUS];
8386
8387 /*
8388 * Save the original ist values for checking stack pointers during debugging
8389@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8390 */
8391 #define TASK_SIZE PAGE_OFFSET
8392 #define TASK_SIZE_MAX TASK_SIZE
8393+
8394+#ifdef CONFIG_PAX_SEGMEXEC
8395+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8396+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8397+#else
8398 #define STACK_TOP TASK_SIZE
8399-#define STACK_TOP_MAX STACK_TOP
8400+#endif
8401+
8402+#define STACK_TOP_MAX TASK_SIZE
8403
8404 #define INIT_THREAD { \
8405- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8406+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8407 .vm86_info = NULL, \
8408 .sysenter_cs = __KERNEL_CS, \
8409 .io_bitmap_ptr = NULL, \
8410@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8411 */
8412 #define INIT_TSS { \
8413 .x86_tss = { \
8414- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8415+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8416 .ss0 = __KERNEL_DS, \
8417 .ss1 = __KERNEL_CS, \
8418 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8419@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8420 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8421
8422 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8423-#define KSTK_TOP(info) \
8424-({ \
8425- unsigned long *__ptr = (unsigned long *)(info); \
8426- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8427-})
8428+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8429
8430 /*
8431 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8432@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8433 #define task_pt_regs(task) \
8434 ({ \
8435 struct pt_regs *__regs__; \
8436- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8437+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8438 __regs__ - 1; \
8439 })
8440
8441@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8442 /*
8443 * User space process size. 47bits minus one guard page.
8444 */
8445-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8446+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8447
8448 /* This decides where the kernel will search for a free chunk of vm
8449 * space during mmap's.
8450 */
8451 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8452- 0xc0000000 : 0xFFFFe000)
8453+ 0xc0000000 : 0xFFFFf000)
8454
8455 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8456 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8457@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8458 #define STACK_TOP_MAX TASK_SIZE_MAX
8459
8460 #define INIT_THREAD { \
8461- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8462+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8463 }
8464
8465 #define INIT_TSS { \
8466- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8467+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8468 }
8469
8470 /*
8471@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8472 */
8473 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8474
8475+#ifdef CONFIG_PAX_SEGMEXEC
8476+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8477+#endif
8478+
8479 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8480
8481 /* Get/set a process' ability to use the timestamp counter instruction */
8482diff -urNp linux-3.0.4/arch/x86/include/asm/ptrace.h linux-3.0.4/arch/x86/include/asm/ptrace.h
8483--- linux-3.0.4/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
8484+++ linux-3.0.4/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
8485@@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8486 }
8487
8488 /*
8489- * user_mode_vm(regs) determines whether a register set came from user mode.
8490+ * user_mode(regs) determines whether a register set came from user mode.
8491 * This is true if V8086 mode was enabled OR if the register set was from
8492 * protected mode with RPL-3 CS value. This tricky test checks that with
8493 * one comparison. Many places in the kernel can bypass this full check
8494- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8495+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8496+ * be used.
8497 */
8498-static inline int user_mode(struct pt_regs *regs)
8499+static inline int user_mode_novm(struct pt_regs *regs)
8500 {
8501 #ifdef CONFIG_X86_32
8502 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8503 #else
8504- return !!(regs->cs & 3);
8505+ return !!(regs->cs & SEGMENT_RPL_MASK);
8506 #endif
8507 }
8508
8509-static inline int user_mode_vm(struct pt_regs *regs)
8510+static inline int user_mode(struct pt_regs *regs)
8511 {
8512 #ifdef CONFIG_X86_32
8513 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8514 USER_RPL;
8515 #else
8516- return user_mode(regs);
8517+ return user_mode_novm(regs);
8518 #endif
8519 }
8520
8521diff -urNp linux-3.0.4/arch/x86/include/asm/reboot.h linux-3.0.4/arch/x86/include/asm/reboot.h
8522--- linux-3.0.4/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
8523+++ linux-3.0.4/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
8524@@ -6,19 +6,19 @@
8525 struct pt_regs;
8526
8527 struct machine_ops {
8528- void (*restart)(char *cmd);
8529- void (*halt)(void);
8530- void (*power_off)(void);
8531+ void (* __noreturn restart)(char *cmd);
8532+ void (* __noreturn halt)(void);
8533+ void (* __noreturn power_off)(void);
8534 void (*shutdown)(void);
8535 void (*crash_shutdown)(struct pt_regs *);
8536- void (*emergency_restart)(void);
8537-};
8538+ void (* __noreturn emergency_restart)(void);
8539+} __no_const;
8540
8541 extern struct machine_ops machine_ops;
8542
8543 void native_machine_crash_shutdown(struct pt_regs *regs);
8544 void native_machine_shutdown(void);
8545-void machine_real_restart(unsigned int type);
8546+void machine_real_restart(unsigned int type) __noreturn;
8547 /* These must match dispatch_table in reboot_32.S */
8548 #define MRR_BIOS 0
8549 #define MRR_APM 1
8550diff -urNp linux-3.0.4/arch/x86/include/asm/rwsem.h linux-3.0.4/arch/x86/include/asm/rwsem.h
8551--- linux-3.0.4/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
8552+++ linux-3.0.4/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
8553@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8554 {
8555 asm volatile("# beginning down_read\n\t"
8556 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8557+
8558+#ifdef CONFIG_PAX_REFCOUNT
8559+ "jno 0f\n"
8560+ LOCK_PREFIX _ASM_DEC "(%1)\n"
8561+ "int $4\n0:\n"
8562+ _ASM_EXTABLE(0b, 0b)
8563+#endif
8564+
8565 /* adds 0x00000001 */
8566 " jns 1f\n"
8567 " call call_rwsem_down_read_failed\n"
8568@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8569 "1:\n\t"
8570 " mov %1,%2\n\t"
8571 " add %3,%2\n\t"
8572+
8573+#ifdef CONFIG_PAX_REFCOUNT
8574+ "jno 0f\n"
8575+ "sub %3,%2\n"
8576+ "int $4\n0:\n"
8577+ _ASM_EXTABLE(0b, 0b)
8578+#endif
8579+
8580 " jle 2f\n\t"
8581 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8582 " jnz 1b\n\t"
8583@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8584 long tmp;
8585 asm volatile("# beginning down_write\n\t"
8586 LOCK_PREFIX " xadd %1,(%2)\n\t"
8587+
8588+#ifdef CONFIG_PAX_REFCOUNT
8589+ "jno 0f\n"
8590+ "mov %1,(%2)\n"
8591+ "int $4\n0:\n"
8592+ _ASM_EXTABLE(0b, 0b)
8593+#endif
8594+
8595 /* adds 0xffff0001, returns the old value */
8596 " test %1,%1\n\t"
8597 /* was the count 0 before? */
8598@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8599 long tmp;
8600 asm volatile("# beginning __up_read\n\t"
8601 LOCK_PREFIX " xadd %1,(%2)\n\t"
8602+
8603+#ifdef CONFIG_PAX_REFCOUNT
8604+ "jno 0f\n"
8605+ "mov %1,(%2)\n"
8606+ "int $4\n0:\n"
8607+ _ASM_EXTABLE(0b, 0b)
8608+#endif
8609+
8610 /* subtracts 1, returns the old value */
8611 " jns 1f\n\t"
8612 " call call_rwsem_wake\n" /* expects old value in %edx */
8613@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8614 long tmp;
8615 asm volatile("# beginning __up_write\n\t"
8616 LOCK_PREFIX " xadd %1,(%2)\n\t"
8617+
8618+#ifdef CONFIG_PAX_REFCOUNT
8619+ "jno 0f\n"
8620+ "mov %1,(%2)\n"
8621+ "int $4\n0:\n"
8622+ _ASM_EXTABLE(0b, 0b)
8623+#endif
8624+
8625 /* subtracts 0xffff0001, returns the old value */
8626 " jns 1f\n\t"
8627 " call call_rwsem_wake\n" /* expects old value in %edx */
8628@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8629 {
8630 asm volatile("# beginning __downgrade_write\n\t"
8631 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8632+
8633+#ifdef CONFIG_PAX_REFCOUNT
8634+ "jno 0f\n"
8635+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8636+ "int $4\n0:\n"
8637+ _ASM_EXTABLE(0b, 0b)
8638+#endif
8639+
8640 /*
8641 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8642 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8643@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8644 */
8645 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8646 {
8647- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8648+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8649+
8650+#ifdef CONFIG_PAX_REFCOUNT
8651+ "jno 0f\n"
8652+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
8653+ "int $4\n0:\n"
8654+ _ASM_EXTABLE(0b, 0b)
8655+#endif
8656+
8657 : "+m" (sem->count)
8658 : "er" (delta));
8659 }
8660@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8661 {
8662 long tmp = delta;
8663
8664- asm volatile(LOCK_PREFIX "xadd %0,%1"
8665+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8666+
8667+#ifdef CONFIG_PAX_REFCOUNT
8668+ "jno 0f\n"
8669+ "mov %0,%1\n"
8670+ "int $4\n0:\n"
8671+ _ASM_EXTABLE(0b, 0b)
8672+#endif
8673+
8674 : "+r" (tmp), "+m" (sem->count)
8675 : : "memory");
8676
8677diff -urNp linux-3.0.4/arch/x86/include/asm/segment.h linux-3.0.4/arch/x86/include/asm/segment.h
8678--- linux-3.0.4/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
8679+++ linux-3.0.4/arch/x86/include/asm/segment.h 2011-09-17 00:53:42.000000000 -0400
8680@@ -64,10 +64,15 @@
8681 * 26 - ESPFIX small SS
8682 * 27 - per-cpu [ offset to per-cpu data area ]
8683 * 28 - stack_canary-20 [ for stack protector ]
8684- * 29 - unused
8685- * 30 - unused
8686+ * 29 - PCI BIOS CS
8687+ * 30 - PCI BIOS DS
8688 * 31 - TSS for double fault handler
8689 */
8690+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
8691+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
8692+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
8693+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
8694+
8695 #define GDT_ENTRY_TLS_MIN 6
8696 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
8697
8698@@ -79,6 +84,8 @@
8699
8700 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8701
8702+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8703+
8704 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8705
8706 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8707@@ -104,6 +111,12 @@
8708 #define __KERNEL_STACK_CANARY 0
8709 #endif
8710
8711+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8712+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8713+
8714+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8715+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8716+
8717 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8718
8719 /*
8720@@ -141,7 +154,7 @@
8721 */
8722
8723 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8724-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8725+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8726
8727
8728 #else
8729@@ -165,6 +178,8 @@
8730 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8731 #define __USER32_DS __USER_DS
8732
8733+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8734+
8735 #define GDT_ENTRY_TSS 8 /* needs two entries */
8736 #define GDT_ENTRY_LDT 10 /* needs two entries */
8737 #define GDT_ENTRY_TLS_MIN 12
8738@@ -185,6 +200,7 @@
8739 #endif
8740
8741 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8742+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8743 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8744 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8745 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8746diff -urNp linux-3.0.4/arch/x86/include/asm/smp.h linux-3.0.4/arch/x86/include/asm/smp.h
8747--- linux-3.0.4/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
8748+++ linux-3.0.4/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
8749@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8750 /* cpus sharing the last level cache: */
8751 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8752 DECLARE_PER_CPU(u16, cpu_llc_id);
8753-DECLARE_PER_CPU(int, cpu_number);
8754+DECLARE_PER_CPU(unsigned int, cpu_number);
8755
8756 static inline struct cpumask *cpu_sibling_mask(int cpu)
8757 {
8758@@ -77,7 +77,7 @@ struct smp_ops {
8759
8760 void (*send_call_func_ipi)(const struct cpumask *mask);
8761 void (*send_call_func_single_ipi)(int cpu);
8762-};
8763+} __no_const;
8764
8765 /* Globals due to paravirt */
8766 extern void set_cpu_sibling_map(int cpu);
8767@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8768 extern int safe_smp_processor_id(void);
8769
8770 #elif defined(CONFIG_X86_64_SMP)
8771-#define raw_smp_processor_id() (percpu_read(cpu_number))
8772-
8773-#define stack_smp_processor_id() \
8774-({ \
8775- struct thread_info *ti; \
8776- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8777- ti->cpu; \
8778-})
8779+#define raw_smp_processor_id() (percpu_read(cpu_number))
8780+#define stack_smp_processor_id() raw_smp_processor_id()
8781 #define safe_smp_processor_id() smp_processor_id()
8782
8783 #endif
8784diff -urNp linux-3.0.4/arch/x86/include/asm/spinlock.h linux-3.0.4/arch/x86/include/asm/spinlock.h
8785--- linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
8786+++ linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
8787@@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8788 static inline void arch_read_lock(arch_rwlock_t *rw)
8789 {
8790 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8791+
8792+#ifdef CONFIG_PAX_REFCOUNT
8793+ "jno 0f\n"
8794+ LOCK_PREFIX " addl $1,(%0)\n"
8795+ "int $4\n0:\n"
8796+ _ASM_EXTABLE(0b, 0b)
8797+#endif
8798+
8799 "jns 1f\n"
8800 "call __read_lock_failed\n\t"
8801 "1:\n"
8802@@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8803 static inline void arch_write_lock(arch_rwlock_t *rw)
8804 {
8805 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8806+
8807+#ifdef CONFIG_PAX_REFCOUNT
8808+ "jno 0f\n"
8809+ LOCK_PREFIX " addl %1,(%0)\n"
8810+ "int $4\n0:\n"
8811+ _ASM_EXTABLE(0b, 0b)
8812+#endif
8813+
8814 "jz 1f\n"
8815 "call __write_lock_failed\n\t"
8816 "1:\n"
8817@@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8818
8819 static inline void arch_read_unlock(arch_rwlock_t *rw)
8820 {
8821- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8822+ asm volatile(LOCK_PREFIX "incl %0\n"
8823+
8824+#ifdef CONFIG_PAX_REFCOUNT
8825+ "jno 0f\n"
8826+ LOCK_PREFIX "decl %0\n"
8827+ "int $4\n0:\n"
8828+ _ASM_EXTABLE(0b, 0b)
8829+#endif
8830+
8831+ :"+m" (rw->lock) : : "memory");
8832 }
8833
8834 static inline void arch_write_unlock(arch_rwlock_t *rw)
8835 {
8836- asm volatile(LOCK_PREFIX "addl %1, %0"
8837+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
8838+
8839+#ifdef CONFIG_PAX_REFCOUNT
8840+ "jno 0f\n"
8841+ LOCK_PREFIX "subl %1, %0\n"
8842+ "int $4\n0:\n"
8843+ _ASM_EXTABLE(0b, 0b)
8844+#endif
8845+
8846 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8847 }
8848
8849diff -urNp linux-3.0.4/arch/x86/include/asm/stackprotector.h linux-3.0.4/arch/x86/include/asm/stackprotector.h
8850--- linux-3.0.4/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
8851+++ linux-3.0.4/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
8852@@ -48,7 +48,7 @@
8853 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8854 */
8855 #define GDT_STACK_CANARY_INIT \
8856- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8857+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8858
8859 /*
8860 * Initialize the stackprotector canary value.
8861@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8862
8863 static inline void load_stack_canary_segment(void)
8864 {
8865-#ifdef CONFIG_X86_32
8866+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8867 asm volatile ("mov %0, %%gs" : : "r" (0));
8868 #endif
8869 }
8870diff -urNp linux-3.0.4/arch/x86/include/asm/stacktrace.h linux-3.0.4/arch/x86/include/asm/stacktrace.h
8871--- linux-3.0.4/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
8872+++ linux-3.0.4/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
8873@@ -11,28 +11,20 @@
8874
8875 extern int kstack_depth_to_print;
8876
8877-struct thread_info;
8878+struct task_struct;
8879 struct stacktrace_ops;
8880
8881-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8882- unsigned long *stack,
8883- unsigned long bp,
8884- const struct stacktrace_ops *ops,
8885- void *data,
8886- unsigned long *end,
8887- int *graph);
8888-
8889-extern unsigned long
8890-print_context_stack(struct thread_info *tinfo,
8891- unsigned long *stack, unsigned long bp,
8892- const struct stacktrace_ops *ops, void *data,
8893- unsigned long *end, int *graph);
8894-
8895-extern unsigned long
8896-print_context_stack_bp(struct thread_info *tinfo,
8897- unsigned long *stack, unsigned long bp,
8898- const struct stacktrace_ops *ops, void *data,
8899- unsigned long *end, int *graph);
8900+typedef unsigned long walk_stack_t(struct task_struct *task,
8901+ void *stack_start,
8902+ unsigned long *stack,
8903+ unsigned long bp,
8904+ const struct stacktrace_ops *ops,
8905+ void *data,
8906+ unsigned long *end,
8907+ int *graph);
8908+
8909+extern walk_stack_t print_context_stack;
8910+extern walk_stack_t print_context_stack_bp;
8911
8912 /* Generic stack tracer with callbacks */
8913
8914@@ -40,7 +32,7 @@ struct stacktrace_ops {
8915 void (*address)(void *data, unsigned long address, int reliable);
8916 /* On negative return stop dumping */
8917 int (*stack)(void *data, char *name);
8918- walk_stack_t walk_stack;
8919+ walk_stack_t *walk_stack;
8920 };
8921
8922 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8923diff -urNp linux-3.0.4/arch/x86/include/asm/system.h linux-3.0.4/arch/x86/include/asm/system.h
8924--- linux-3.0.4/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
8925+++ linux-3.0.4/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
8926@@ -129,7 +129,7 @@ do { \
8927 "call __switch_to\n\t" \
8928 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8929 __switch_canary \
8930- "movq %P[thread_info](%%rsi),%%r8\n\t" \
8931+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
8932 "movq %%rax,%%rdi\n\t" \
8933 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
8934 "jnz ret_from_fork\n\t" \
8935@@ -140,7 +140,7 @@ do { \
8936 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
8937 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
8938 [_tif_fork] "i" (_TIF_FORK), \
8939- [thread_info] "i" (offsetof(struct task_struct, stack)), \
8940+ [thread_info] "m" (current_tinfo), \
8941 [current_task] "m" (current_task) \
8942 __switch_canary_iparam \
8943 : "memory", "cc" __EXTRA_CLOBBER)
8944@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
8945 {
8946 unsigned long __limit;
8947 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8948- return __limit + 1;
8949+ return __limit;
8950 }
8951
8952 static inline void native_clts(void)
8953@@ -397,12 +397,12 @@ void enable_hlt(void);
8954
8955 void cpu_idle_wait(void);
8956
8957-extern unsigned long arch_align_stack(unsigned long sp);
8958+#define arch_align_stack(x) ((x) & ~0xfUL)
8959 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8960
8961 void default_idle(void);
8962
8963-void stop_this_cpu(void *dummy);
8964+void stop_this_cpu(void *dummy) __noreturn;
8965
8966 /*
8967 * Force strict CPU ordering.
8968diff -urNp linux-3.0.4/arch/x86/include/asm/thread_info.h linux-3.0.4/arch/x86/include/asm/thread_info.h
8969--- linux-3.0.4/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
8970+++ linux-3.0.4/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
8971@@ -10,6 +10,7 @@
8972 #include <linux/compiler.h>
8973 #include <asm/page.h>
8974 #include <asm/types.h>
8975+#include <asm/percpu.h>
8976
8977 /*
8978 * low level task data that entry.S needs immediate access to
8979@@ -24,7 +25,6 @@ struct exec_domain;
8980 #include <asm/atomic.h>
8981
8982 struct thread_info {
8983- struct task_struct *task; /* main task structure */
8984 struct exec_domain *exec_domain; /* execution domain */
8985 __u32 flags; /* low level flags */
8986 __u32 status; /* thread synchronous flags */
8987@@ -34,18 +34,12 @@ struct thread_info {
8988 mm_segment_t addr_limit;
8989 struct restart_block restart_block;
8990 void __user *sysenter_return;
8991-#ifdef CONFIG_X86_32
8992- unsigned long previous_esp; /* ESP of the previous stack in
8993- case of nested (IRQ) stacks
8994- */
8995- __u8 supervisor_stack[0];
8996-#endif
8997+ unsigned long lowest_stack;
8998 int uaccess_err;
8999 };
9000
9001-#define INIT_THREAD_INFO(tsk) \
9002+#define INIT_THREAD_INFO \
9003 { \
9004- .task = &tsk, \
9005 .exec_domain = &default_exec_domain, \
9006 .flags = 0, \
9007 .cpu = 0, \
9008@@ -56,7 +50,7 @@ struct thread_info {
9009 }, \
9010 }
9011
9012-#define init_thread_info (init_thread_union.thread_info)
9013+#define init_thread_info (init_thread_union.stack)
9014 #define init_stack (init_thread_union.stack)
9015
9016 #else /* !__ASSEMBLY__ */
9017@@ -170,6 +164,23 @@ struct thread_info {
9018 ret; \
9019 })
9020
9021+#ifdef __ASSEMBLY__
9022+/* how to get the thread information struct from ASM */
9023+#define GET_THREAD_INFO(reg) \
9024+ mov PER_CPU_VAR(current_tinfo), reg
9025+
9026+/* use this one if reg already contains %esp */
9027+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9028+#else
9029+/* how to get the thread information struct from C */
9030+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9031+
9032+static __always_inline struct thread_info *current_thread_info(void)
9033+{
9034+ return percpu_read_stable(current_tinfo);
9035+}
9036+#endif
9037+
9038 #ifdef CONFIG_X86_32
9039
9040 #define STACK_WARN (THREAD_SIZE/8)
9041@@ -180,35 +191,13 @@ struct thread_info {
9042 */
9043 #ifndef __ASSEMBLY__
9044
9045-
9046 /* how to get the current stack pointer from C */
9047 register unsigned long current_stack_pointer asm("esp") __used;
9048
9049-/* how to get the thread information struct from C */
9050-static inline struct thread_info *current_thread_info(void)
9051-{
9052- return (struct thread_info *)
9053- (current_stack_pointer & ~(THREAD_SIZE - 1));
9054-}
9055-
9056-#else /* !__ASSEMBLY__ */
9057-
9058-/* how to get the thread information struct from ASM */
9059-#define GET_THREAD_INFO(reg) \
9060- movl $-THREAD_SIZE, reg; \
9061- andl %esp, reg
9062-
9063-/* use this one if reg already contains %esp */
9064-#define GET_THREAD_INFO_WITH_ESP(reg) \
9065- andl $-THREAD_SIZE, reg
9066-
9067 #endif
9068
9069 #else /* X86_32 */
9070
9071-#include <asm/percpu.h>
9072-#define KERNEL_STACK_OFFSET (5*8)
9073-
9074 /*
9075 * macros/functions for gaining access to the thread information structure
9076 * preempt_count needs to be 1 initially, until the scheduler is functional.
9077@@ -216,21 +205,8 @@ static inline struct thread_info *curren
9078 #ifndef __ASSEMBLY__
9079 DECLARE_PER_CPU(unsigned long, kernel_stack);
9080
9081-static inline struct thread_info *current_thread_info(void)
9082-{
9083- struct thread_info *ti;
9084- ti = (void *)(percpu_read_stable(kernel_stack) +
9085- KERNEL_STACK_OFFSET - THREAD_SIZE);
9086- return ti;
9087-}
9088-
9089-#else /* !__ASSEMBLY__ */
9090-
9091-/* how to get the thread information struct from ASM */
9092-#define GET_THREAD_INFO(reg) \
9093- movq PER_CPU_VAR(kernel_stack),reg ; \
9094- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9095-
9096+/* how to get the current stack pointer from C */
9097+register unsigned long current_stack_pointer asm("rsp") __used;
9098 #endif
9099
9100 #endif /* !X86_32 */
9101@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9102 extern void free_thread_info(struct thread_info *ti);
9103 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9104 #define arch_task_cache_init arch_task_cache_init
9105+
9106+#define __HAVE_THREAD_FUNCTIONS
9107+#define task_thread_info(task) (&(task)->tinfo)
9108+#define task_stack_page(task) ((task)->stack)
9109+#define setup_thread_stack(p, org) do {} while (0)
9110+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9111+
9112+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9113+extern struct task_struct *alloc_task_struct_node(int node);
9114+extern void free_task_struct(struct task_struct *);
9115+
9116 #endif
9117 #endif /* _ASM_X86_THREAD_INFO_H */
9118diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_32.h linux-3.0.4/arch/x86/include/asm/uaccess_32.h
9119--- linux-3.0.4/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
9120+++ linux-3.0.4/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
9121@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9122 static __always_inline unsigned long __must_check
9123 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9124 {
9125+ pax_track_stack();
9126+
9127+ if ((long)n < 0)
9128+ return n;
9129+
9130 if (__builtin_constant_p(n)) {
9131 unsigned long ret;
9132
9133@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9134 return ret;
9135 }
9136 }
9137+ if (!__builtin_constant_p(n))
9138+ check_object_size(from, n, true);
9139 return __copy_to_user_ll(to, from, n);
9140 }
9141
9142@@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9143 __copy_to_user(void __user *to, const void *from, unsigned long n)
9144 {
9145 might_fault();
9146+
9147 return __copy_to_user_inatomic(to, from, n);
9148 }
9149
9150 static __always_inline unsigned long
9151 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9152 {
9153+ if ((long)n < 0)
9154+ return n;
9155+
9156 /* Avoid zeroing the tail if the copy fails..
9157 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9158 * but as the zeroing behaviour is only significant when n is not
9159@@ -137,6 +148,12 @@ static __always_inline unsigned long
9160 __copy_from_user(void *to, const void __user *from, unsigned long n)
9161 {
9162 might_fault();
9163+
9164+ pax_track_stack();
9165+
9166+ if ((long)n < 0)
9167+ return n;
9168+
9169 if (__builtin_constant_p(n)) {
9170 unsigned long ret;
9171
9172@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9173 return ret;
9174 }
9175 }
9176+ if (!__builtin_constant_p(n))
9177+ check_object_size(to, n, false);
9178 return __copy_from_user_ll(to, from, n);
9179 }
9180
9181@@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9182 const void __user *from, unsigned long n)
9183 {
9184 might_fault();
9185+
9186+ if ((long)n < 0)
9187+ return n;
9188+
9189 if (__builtin_constant_p(n)) {
9190 unsigned long ret;
9191
9192@@ -181,15 +204,19 @@ static __always_inline unsigned long
9193 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9194 unsigned long n)
9195 {
9196- return __copy_from_user_ll_nocache_nozero(to, from, n);
9197-}
9198+ if ((long)n < 0)
9199+ return n;
9200
9201-unsigned long __must_check copy_to_user(void __user *to,
9202- const void *from, unsigned long n);
9203-unsigned long __must_check _copy_from_user(void *to,
9204- const void __user *from,
9205- unsigned long n);
9206+ return __copy_from_user_ll_nocache_nozero(to, from, n);
9207+}
9208
9209+extern void copy_to_user_overflow(void)
9210+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9211+ __compiletime_error("copy_to_user() buffer size is not provably correct")
9212+#else
9213+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
9214+#endif
9215+;
9216
9217 extern void copy_from_user_overflow(void)
9218 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9219@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9220 #endif
9221 ;
9222
9223-static inline unsigned long __must_check copy_from_user(void *to,
9224- const void __user *from,
9225- unsigned long n)
9226+/**
9227+ * copy_to_user: - Copy a block of data into user space.
9228+ * @to: Destination address, in user space.
9229+ * @from: Source address, in kernel space.
9230+ * @n: Number of bytes to copy.
9231+ *
9232+ * Context: User context only. This function may sleep.
9233+ *
9234+ * Copy data from kernel space to user space.
9235+ *
9236+ * Returns number of bytes that could not be copied.
9237+ * On success, this will be zero.
9238+ */
9239+static inline unsigned long __must_check
9240+copy_to_user(void __user *to, const void *from, unsigned long n)
9241+{
9242+ int sz = __compiletime_object_size(from);
9243+
9244+ if (unlikely(sz != -1 && sz < n))
9245+ copy_to_user_overflow();
9246+ else if (access_ok(VERIFY_WRITE, to, n))
9247+ n = __copy_to_user(to, from, n);
9248+ return n;
9249+}
9250+
9251+/**
9252+ * copy_from_user: - Copy a block of data from user space.
9253+ * @to: Destination address, in kernel space.
9254+ * @from: Source address, in user space.
9255+ * @n: Number of bytes to copy.
9256+ *
9257+ * Context: User context only. This function may sleep.
9258+ *
9259+ * Copy data from user space to kernel space.
9260+ *
9261+ * Returns number of bytes that could not be copied.
9262+ * On success, this will be zero.
9263+ *
9264+ * If some data could not be copied, this function will pad the copied
9265+ * data to the requested size using zero bytes.
9266+ */
9267+static inline unsigned long __must_check
9268+copy_from_user(void *to, const void __user *from, unsigned long n)
9269 {
9270 int sz = __compiletime_object_size(to);
9271
9272- if (likely(sz == -1 || sz >= n))
9273- n = _copy_from_user(to, from, n);
9274- else
9275+ if (unlikely(sz != -1 && sz < n))
9276 copy_from_user_overflow();
9277-
9278+ else if (access_ok(VERIFY_READ, from, n))
9279+ n = __copy_from_user(to, from, n);
9280+ else if ((long)n > 0) {
9281+ if (!__builtin_constant_p(n))
9282+ check_object_size(to, n, false);
9283+ memset(to, 0, n);
9284+ }
9285 return n;
9286 }
9287
9288diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_64.h linux-3.0.4/arch/x86/include/asm/uaccess_64.h
9289--- linux-3.0.4/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
9290+++ linux-3.0.4/arch/x86/include/asm/uaccess_64.h 2011-08-23 21:48:14.000000000 -0400
9291@@ -10,6 +10,9 @@
9292 #include <asm/alternative.h>
9293 #include <asm/cpufeature.h>
9294 #include <asm/page.h>
9295+#include <asm/pgtable.h>
9296+
9297+#define set_fs(x) (current_thread_info()->addr_limit = (x))
9298
9299 /*
9300 * Copy To/From Userspace
9301@@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9302 return ret;
9303 }
9304
9305-__must_check unsigned long
9306-_copy_to_user(void __user *to, const void *from, unsigned len);
9307-__must_check unsigned long
9308-_copy_from_user(void *to, const void __user *from, unsigned len);
9309+static __always_inline __must_check unsigned long
9310+__copy_to_user(void __user *to, const void *from, unsigned len);
9311+static __always_inline __must_check unsigned long
9312+__copy_from_user(void *to, const void __user *from, unsigned len);
9313 __must_check unsigned long
9314 copy_in_user(void __user *to, const void __user *from, unsigned len);
9315
9316 static inline unsigned long __must_check copy_from_user(void *to,
9317 const void __user *from,
9318- unsigned long n)
9319+ unsigned n)
9320 {
9321- int sz = __compiletime_object_size(to);
9322-
9323 might_fault();
9324- if (likely(sz == -1 || sz >= n))
9325- n = _copy_from_user(to, from, n);
9326-#ifdef CONFIG_DEBUG_VM
9327- else
9328- WARN(1, "Buffer overflow detected!\n");
9329-#endif
9330+
9331+ if (access_ok(VERIFY_READ, from, n))
9332+ n = __copy_from_user(to, from, n);
9333+ else if ((int)n > 0) {
9334+ if (!__builtin_constant_p(n))
9335+ check_object_size(to, n, false);
9336+ memset(to, 0, n);
9337+ }
9338 return n;
9339 }
9340
9341@@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9342 {
9343 might_fault();
9344
9345- return _copy_to_user(dst, src, size);
9346+ if (access_ok(VERIFY_WRITE, dst, size))
9347+ size = __copy_to_user(dst, src, size);
9348+ return size;
9349 }
9350
9351 static __always_inline __must_check
9352-int __copy_from_user(void *dst, const void __user *src, unsigned size)
9353+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9354 {
9355- int ret = 0;
9356+ int sz = __compiletime_object_size(dst);
9357+ unsigned ret = 0;
9358
9359 might_fault();
9360- if (!__builtin_constant_p(size))
9361- return copy_user_generic(dst, (__force void *)src, size);
9362+
9363+ pax_track_stack();
9364+
9365+ if ((int)size < 0)
9366+ return size;
9367+
9368+#ifdef CONFIG_PAX_MEMORY_UDEREF
9369+ if (!__access_ok(VERIFY_READ, src, size))
9370+ return size;
9371+#endif
9372+
9373+ if (unlikely(sz != -1 && sz < size)) {
9374+#ifdef CONFIG_DEBUG_VM
9375+ WARN(1, "Buffer overflow detected!\n");
9376+#endif
9377+ return size;
9378+ }
9379+
9380+ if (!__builtin_constant_p(size)) {
9381+ check_object_size(dst, size, false);
9382+
9383+#ifdef CONFIG_PAX_MEMORY_UDEREF
9384+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9385+ src += PAX_USER_SHADOW_BASE;
9386+#endif
9387+
9388+ return copy_user_generic(dst, (__force const void *)src, size);
9389+ }
9390 switch (size) {
9391- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9392+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9393 ret, "b", "b", "=q", 1);
9394 return ret;
9395- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9396+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9397 ret, "w", "w", "=r", 2);
9398 return ret;
9399- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9400+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9401 ret, "l", "k", "=r", 4);
9402 return ret;
9403- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9404+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9405 ret, "q", "", "=r", 8);
9406 return ret;
9407 case 10:
9408- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9409+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9410 ret, "q", "", "=r", 10);
9411 if (unlikely(ret))
9412 return ret;
9413 __get_user_asm(*(u16 *)(8 + (char *)dst),
9414- (u16 __user *)(8 + (char __user *)src),
9415+ (const u16 __user *)(8 + (const char __user *)src),
9416 ret, "w", "w", "=r", 2);
9417 return ret;
9418 case 16:
9419- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9420+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9421 ret, "q", "", "=r", 16);
9422 if (unlikely(ret))
9423 return ret;
9424 __get_user_asm(*(u64 *)(8 + (char *)dst),
9425- (u64 __user *)(8 + (char __user *)src),
9426+ (const u64 __user *)(8 + (const char __user *)src),
9427 ret, "q", "", "=r", 8);
9428 return ret;
9429 default:
9430- return copy_user_generic(dst, (__force void *)src, size);
9431+
9432+#ifdef CONFIG_PAX_MEMORY_UDEREF
9433+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9434+ src += PAX_USER_SHADOW_BASE;
9435+#endif
9436+
9437+ return copy_user_generic(dst, (__force const void *)src, size);
9438 }
9439 }
9440
9441 static __always_inline __must_check
9442-int __copy_to_user(void __user *dst, const void *src, unsigned size)
9443+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9444 {
9445- int ret = 0;
9446+ int sz = __compiletime_object_size(src);
9447+ unsigned ret = 0;
9448
9449 might_fault();
9450- if (!__builtin_constant_p(size))
9451+
9452+ pax_track_stack();
9453+
9454+ if ((int)size < 0)
9455+ return size;
9456+
9457+#ifdef CONFIG_PAX_MEMORY_UDEREF
9458+ if (!__access_ok(VERIFY_WRITE, dst, size))
9459+ return size;
9460+#endif
9461+
9462+ if (unlikely(sz != -1 && sz < size)) {
9463+#ifdef CONFIG_DEBUG_VM
9464+ WARN(1, "Buffer overflow detected!\n");
9465+#endif
9466+ return size;
9467+ }
9468+
9469+ if (!__builtin_constant_p(size)) {
9470+ check_object_size(src, size, true);
9471+
9472+#ifdef CONFIG_PAX_MEMORY_UDEREF
9473+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9474+ dst += PAX_USER_SHADOW_BASE;
9475+#endif
9476+
9477 return copy_user_generic((__force void *)dst, src, size);
9478+ }
9479 switch (size) {
9480- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9481+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9482 ret, "b", "b", "iq", 1);
9483 return ret;
9484- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9485+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9486 ret, "w", "w", "ir", 2);
9487 return ret;
9488- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9489+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9490 ret, "l", "k", "ir", 4);
9491 return ret;
9492- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9493+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9494 ret, "q", "", "er", 8);
9495 return ret;
9496 case 10:
9497- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9498+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9499 ret, "q", "", "er", 10);
9500 if (unlikely(ret))
9501 return ret;
9502 asm("":::"memory");
9503- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9504+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9505 ret, "w", "w", "ir", 2);
9506 return ret;
9507 case 16:
9508- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9509+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9510 ret, "q", "", "er", 16);
9511 if (unlikely(ret))
9512 return ret;
9513 asm("":::"memory");
9514- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9515+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9516 ret, "q", "", "er", 8);
9517 return ret;
9518 default:
9519+
9520+#ifdef CONFIG_PAX_MEMORY_UDEREF
9521+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9522+ dst += PAX_USER_SHADOW_BASE;
9523+#endif
9524+
9525 return copy_user_generic((__force void *)dst, src, size);
9526 }
9527 }
9528
9529 static __always_inline __must_check
9530-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9531+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9532 {
9533- int ret = 0;
9534+ unsigned ret = 0;
9535
9536 might_fault();
9537- if (!__builtin_constant_p(size))
9538+
9539+ if ((int)size < 0)
9540+ return size;
9541+
9542+#ifdef CONFIG_PAX_MEMORY_UDEREF
9543+ if (!__access_ok(VERIFY_READ, src, size))
9544+ return size;
9545+ if (!__access_ok(VERIFY_WRITE, dst, size))
9546+ return size;
9547+#endif
9548+
9549+ if (!__builtin_constant_p(size)) {
9550+
9551+#ifdef CONFIG_PAX_MEMORY_UDEREF
9552+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9553+ src += PAX_USER_SHADOW_BASE;
9554+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9555+ dst += PAX_USER_SHADOW_BASE;
9556+#endif
9557+
9558 return copy_user_generic((__force void *)dst,
9559- (__force void *)src, size);
9560+ (__force const void *)src, size);
9561+ }
9562 switch (size) {
9563 case 1: {
9564 u8 tmp;
9565- __get_user_asm(tmp, (u8 __user *)src,
9566+ __get_user_asm(tmp, (const u8 __user *)src,
9567 ret, "b", "b", "=q", 1);
9568 if (likely(!ret))
9569 __put_user_asm(tmp, (u8 __user *)dst,
9570@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9571 }
9572 case 2: {
9573 u16 tmp;
9574- __get_user_asm(tmp, (u16 __user *)src,
9575+ __get_user_asm(tmp, (const u16 __user *)src,
9576 ret, "w", "w", "=r", 2);
9577 if (likely(!ret))
9578 __put_user_asm(tmp, (u16 __user *)dst,
9579@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9580
9581 case 4: {
9582 u32 tmp;
9583- __get_user_asm(tmp, (u32 __user *)src,
9584+ __get_user_asm(tmp, (const u32 __user *)src,
9585 ret, "l", "k", "=r", 4);
9586 if (likely(!ret))
9587 __put_user_asm(tmp, (u32 __user *)dst,
9588@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9589 }
9590 case 8: {
9591 u64 tmp;
9592- __get_user_asm(tmp, (u64 __user *)src,
9593+ __get_user_asm(tmp, (const u64 __user *)src,
9594 ret, "q", "", "=r", 8);
9595 if (likely(!ret))
9596 __put_user_asm(tmp, (u64 __user *)dst,
9597@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9598 return ret;
9599 }
9600 default:
9601+
9602+#ifdef CONFIG_PAX_MEMORY_UDEREF
9603+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9604+ src += PAX_USER_SHADOW_BASE;
9605+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9606+ dst += PAX_USER_SHADOW_BASE;
9607+#endif
9608+
9609 return copy_user_generic((__force void *)dst,
9610- (__force void *)src, size);
9611+ (__force const void *)src, size);
9612 }
9613 }
9614
9615@@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9616 static __must_check __always_inline int
9617 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9618 {
9619+ pax_track_stack();
9620+
9621+ if ((int)size < 0)
9622+ return size;
9623+
9624+#ifdef CONFIG_PAX_MEMORY_UDEREF
9625+ if (!__access_ok(VERIFY_READ, src, size))
9626+ return size;
9627+
9628+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9629+ src += PAX_USER_SHADOW_BASE;
9630+#endif
9631+
9632 return copy_user_generic(dst, (__force const void *)src, size);
9633 }
9634
9635-static __must_check __always_inline int
9636+static __must_check __always_inline unsigned long
9637 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9638 {
9639+ if ((int)size < 0)
9640+ return size;
9641+
9642+#ifdef CONFIG_PAX_MEMORY_UDEREF
9643+ if (!__access_ok(VERIFY_WRITE, dst, size))
9644+ return size;
9645+
9646+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9647+ dst += PAX_USER_SHADOW_BASE;
9648+#endif
9649+
9650 return copy_user_generic((__force void *)dst, src, size);
9651 }
9652
9653-extern long __copy_user_nocache(void *dst, const void __user *src,
9654+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9655 unsigned size, int zerorest);
9656
9657-static inline int
9658-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9659+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9660 {
9661 might_sleep();
9662+
9663+ if ((int)size < 0)
9664+ return size;
9665+
9666+#ifdef CONFIG_PAX_MEMORY_UDEREF
9667+ if (!__access_ok(VERIFY_READ, src, size))
9668+ return size;
9669+#endif
9670+
9671 return __copy_user_nocache(dst, src, size, 1);
9672 }
9673
9674-static inline int
9675-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9676+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9677 unsigned size)
9678 {
9679+ if ((int)size < 0)
9680+ return size;
9681+
9682+#ifdef CONFIG_PAX_MEMORY_UDEREF
9683+ if (!__access_ok(VERIFY_READ, src, size))
9684+ return size;
9685+#endif
9686+
9687 return __copy_user_nocache(dst, src, size, 0);
9688 }
9689
9690-unsigned long
9691+extern unsigned long
9692 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9693
9694 #endif /* _ASM_X86_UACCESS_64_H */
9695diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess.h linux-3.0.4/arch/x86/include/asm/uaccess.h
9696--- linux-3.0.4/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9697+++ linux-3.0.4/arch/x86/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
9698@@ -7,12 +7,15 @@
9699 #include <linux/compiler.h>
9700 #include <linux/thread_info.h>
9701 #include <linux/string.h>
9702+#include <linux/sched.h>
9703 #include <asm/asm.h>
9704 #include <asm/page.h>
9705
9706 #define VERIFY_READ 0
9707 #define VERIFY_WRITE 1
9708
9709+extern void check_object_size(const void *ptr, unsigned long n, bool to);
9710+
9711 /*
9712 * The fs value determines whether argument validity checking should be
9713 * performed or not. If get_fs() == USER_DS, checking is performed, with
9714@@ -28,7 +31,12 @@
9715
9716 #define get_ds() (KERNEL_DS)
9717 #define get_fs() (current_thread_info()->addr_limit)
9718+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9719+void __set_fs(mm_segment_t x);
9720+void set_fs(mm_segment_t x);
9721+#else
9722 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9723+#endif
9724
9725 #define segment_eq(a, b) ((a).seg == (b).seg)
9726
9727@@ -76,7 +84,33 @@
9728 * checks that the pointer is in the user space range - after calling
9729 * this function, memory access functions may still return -EFAULT.
9730 */
9731-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9732+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9733+#define access_ok(type, addr, size) \
9734+({ \
9735+ long __size = size; \
9736+ unsigned long __addr = (unsigned long)addr; \
9737+ unsigned long __addr_ao = __addr & PAGE_MASK; \
9738+ unsigned long __end_ao = __addr + __size - 1; \
9739+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9740+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9741+ while(__addr_ao <= __end_ao) { \
9742+ char __c_ao; \
9743+ __addr_ao += PAGE_SIZE; \
9744+ if (__size > PAGE_SIZE) \
9745+ cond_resched(); \
9746+ if (__get_user(__c_ao, (char __user *)__addr)) \
9747+ break; \
9748+ if (type != VERIFY_WRITE) { \
9749+ __addr = __addr_ao; \
9750+ continue; \
9751+ } \
9752+ if (__put_user(__c_ao, (char __user *)__addr)) \
9753+ break; \
9754+ __addr = __addr_ao; \
9755+ } \
9756+ } \
9757+ __ret_ao; \
9758+})
9759
9760 /*
9761 * The exception table consists of pairs of addresses: the first is the
9762@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9763 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9764 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9765
9766-
9767+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9768+#define __copyuser_seg "gs;"
9769+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9770+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9771+#else
9772+#define __copyuser_seg
9773+#define __COPYUSER_SET_ES
9774+#define __COPYUSER_RESTORE_ES
9775+#endif
9776
9777 #ifdef CONFIG_X86_32
9778 #define __put_user_asm_u64(x, addr, err, errret) \
9779- asm volatile("1: movl %%eax,0(%2)\n" \
9780- "2: movl %%edx,4(%2)\n" \
9781+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9782+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9783 "3:\n" \
9784 ".section .fixup,\"ax\"\n" \
9785 "4: movl %3,%0\n" \
9786@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
9787 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9788
9789 #define __put_user_asm_ex_u64(x, addr) \
9790- asm volatile("1: movl %%eax,0(%1)\n" \
9791- "2: movl %%edx,4(%1)\n" \
9792+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9793+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9794 "3:\n" \
9795 _ASM_EXTABLE(1b, 2b - 1b) \
9796 _ASM_EXTABLE(2b, 3b - 2b) \
9797@@ -373,7 +415,7 @@ do { \
9798 } while (0)
9799
9800 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9801- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9802+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9803 "2:\n" \
9804 ".section .fixup,\"ax\"\n" \
9805 "3: mov %3,%0\n" \
9806@@ -381,7 +423,7 @@ do { \
9807 " jmp 2b\n" \
9808 ".previous\n" \
9809 _ASM_EXTABLE(1b, 3b) \
9810- : "=r" (err), ltype(x) \
9811+ : "=r" (err), ltype (x) \
9812 : "m" (__m(addr)), "i" (errret), "0" (err))
9813
9814 #define __get_user_size_ex(x, ptr, size) \
9815@@ -406,7 +448,7 @@ do { \
9816 } while (0)
9817
9818 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9819- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9820+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9821 "2:\n" \
9822 _ASM_EXTABLE(1b, 2b - 1b) \
9823 : ltype(x) : "m" (__m(addr)))
9824@@ -423,13 +465,24 @@ do { \
9825 int __gu_err; \
9826 unsigned long __gu_val; \
9827 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9828- (x) = (__force __typeof__(*(ptr)))__gu_val; \
9829+ (x) = (__typeof__(*(ptr)))__gu_val; \
9830 __gu_err; \
9831 })
9832
9833 /* FIXME: this hack is definitely wrong -AK */
9834 struct __large_struct { unsigned long buf[100]; };
9835-#define __m(x) (*(struct __large_struct __user *)(x))
9836+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9837+#define ____m(x) \
9838+({ \
9839+ unsigned long ____x = (unsigned long)(x); \
9840+ if (____x < PAX_USER_SHADOW_BASE) \
9841+ ____x += PAX_USER_SHADOW_BASE; \
9842+ (void __user *)____x; \
9843+})
9844+#else
9845+#define ____m(x) (x)
9846+#endif
9847+#define __m(x) (*(struct __large_struct __user *)____m(x))
9848
9849 /*
9850 * Tell gcc we read from memory instead of writing: this is because
9851@@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
9852 * aliasing issues.
9853 */
9854 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9855- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9856+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9857 "2:\n" \
9858 ".section .fixup,\"ax\"\n" \
9859 "3: mov %3,%0\n" \
9860@@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
9861 ".previous\n" \
9862 _ASM_EXTABLE(1b, 3b) \
9863 : "=r"(err) \
9864- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9865+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9866
9867 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9868- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9869+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9870 "2:\n" \
9871 _ASM_EXTABLE(1b, 2b - 1b) \
9872 : : ltype(x), "m" (__m(addr)))
9873@@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
9874 * On error, the variable @x is set to zero.
9875 */
9876
9877+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9878+#define __get_user(x, ptr) get_user((x), (ptr))
9879+#else
9880 #define __get_user(x, ptr) \
9881 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9882+#endif
9883
9884 /**
9885 * __put_user: - Write a simple value into user space, with less checking.
9886@@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
9887 * Returns zero on success, or -EFAULT on error.
9888 */
9889
9890+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9891+#define __put_user(x, ptr) put_user((x), (ptr))
9892+#else
9893 #define __put_user(x, ptr) \
9894 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9895+#endif
9896
9897 #define __get_user_unaligned __get_user
9898 #define __put_user_unaligned __put_user
9899@@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
9900 #define get_user_ex(x, ptr) do { \
9901 unsigned long __gue_val; \
9902 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9903- (x) = (__force __typeof__(*(ptr)))__gue_val; \
9904+ (x) = (__typeof__(*(ptr)))__gue_val; \
9905 } while (0)
9906
9907 #ifdef CONFIG_X86_WP_WORKS_OK
9908diff -urNp linux-3.0.4/arch/x86/include/asm/x86_init.h linux-3.0.4/arch/x86/include/asm/x86_init.h
9909--- linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
9910+++ linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
9911@@ -28,7 +28,7 @@ struct x86_init_mpparse {
9912 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9913 void (*find_smp_config)(void);
9914 void (*get_smp_config)(unsigned int early);
9915-};
9916+} __no_const;
9917
9918 /**
9919 * struct x86_init_resources - platform specific resource related ops
9920@@ -42,7 +42,7 @@ struct x86_init_resources {
9921 void (*probe_roms)(void);
9922 void (*reserve_resources)(void);
9923 char *(*memory_setup)(void);
9924-};
9925+} __no_const;
9926
9927 /**
9928 * struct x86_init_irqs - platform specific interrupt setup
9929@@ -55,7 +55,7 @@ struct x86_init_irqs {
9930 void (*pre_vector_init)(void);
9931 void (*intr_init)(void);
9932 void (*trap_init)(void);
9933-};
9934+} __no_const;
9935
9936 /**
9937 * struct x86_init_oem - oem platform specific customizing functions
9938@@ -65,7 +65,7 @@ struct x86_init_irqs {
9939 struct x86_init_oem {
9940 void (*arch_setup)(void);
9941 void (*banner)(void);
9942-};
9943+} __no_const;
9944
9945 /**
9946 * struct x86_init_mapping - platform specific initial kernel pagetable setup
9947@@ -76,7 +76,7 @@ struct x86_init_oem {
9948 */
9949 struct x86_init_mapping {
9950 void (*pagetable_reserve)(u64 start, u64 end);
9951-};
9952+} __no_const;
9953
9954 /**
9955 * struct x86_init_paging - platform specific paging functions
9956@@ -86,7 +86,7 @@ struct x86_init_mapping {
9957 struct x86_init_paging {
9958 void (*pagetable_setup_start)(pgd_t *base);
9959 void (*pagetable_setup_done)(pgd_t *base);
9960-};
9961+} __no_const;
9962
9963 /**
9964 * struct x86_init_timers - platform specific timer setup
9965@@ -101,7 +101,7 @@ struct x86_init_timers {
9966 void (*tsc_pre_init)(void);
9967 void (*timer_init)(void);
9968 void (*wallclock_init)(void);
9969-};
9970+} __no_const;
9971
9972 /**
9973 * struct x86_init_iommu - platform specific iommu setup
9974@@ -109,7 +109,7 @@ struct x86_init_timers {
9975 */
9976 struct x86_init_iommu {
9977 int (*iommu_init)(void);
9978-};
9979+} __no_const;
9980
9981 /**
9982 * struct x86_init_pci - platform specific pci init functions
9983@@ -123,7 +123,7 @@ struct x86_init_pci {
9984 int (*init)(void);
9985 void (*init_irq)(void);
9986 void (*fixup_irqs)(void);
9987-};
9988+} __no_const;
9989
9990 /**
9991 * struct x86_init_ops - functions for platform specific setup
9992@@ -139,7 +139,7 @@ struct x86_init_ops {
9993 struct x86_init_timers timers;
9994 struct x86_init_iommu iommu;
9995 struct x86_init_pci pci;
9996-};
9997+} __no_const;
9998
9999 /**
10000 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10001@@ -147,7 +147,7 @@ struct x86_init_ops {
10002 */
10003 struct x86_cpuinit_ops {
10004 void (*setup_percpu_clockev)(void);
10005-};
10006+} __no_const;
10007
10008 /**
10009 * struct x86_platform_ops - platform specific runtime functions
10010@@ -166,7 +166,7 @@ struct x86_platform_ops {
10011 bool (*is_untracked_pat_range)(u64 start, u64 end);
10012 void (*nmi_init)(void);
10013 int (*i8042_detect)(void);
10014-};
10015+} __no_const;
10016
10017 struct pci_dev;
10018
10019@@ -174,7 +174,7 @@ struct x86_msi_ops {
10020 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10021 void (*teardown_msi_irq)(unsigned int irq);
10022 void (*teardown_msi_irqs)(struct pci_dev *dev);
10023-};
10024+} __no_const;
10025
10026 extern struct x86_init_ops x86_init;
10027 extern struct x86_cpuinit_ops x86_cpuinit;
10028diff -urNp linux-3.0.4/arch/x86/include/asm/xsave.h linux-3.0.4/arch/x86/include/asm/xsave.h
10029--- linux-3.0.4/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10030+++ linux-3.0.4/arch/x86/include/asm/xsave.h 2011-08-23 21:47:55.000000000 -0400
10031@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10032 {
10033 int err;
10034
10035+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10036+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10037+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10038+#endif
10039+
10040 /*
10041 * Clear the xsave header first, so that reserved fields are
10042 * initialized to zero.
10043@@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10044 u32 lmask = mask;
10045 u32 hmask = mask >> 32;
10046
10047+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10048+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10049+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10050+#endif
10051+
10052 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10053 "2:\n"
10054 ".section .fixup,\"ax\"\n"
10055diff -urNp linux-3.0.4/arch/x86/Kconfig linux-3.0.4/arch/x86/Kconfig
10056--- linux-3.0.4/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
10057+++ linux-3.0.4/arch/x86/Kconfig 2011-09-17 00:58:36.000000000 -0400
10058@@ -229,7 +229,7 @@ config X86_HT
10059
10060 config X86_32_LAZY_GS
10061 def_bool y
10062- depends on X86_32 && !CC_STACKPROTECTOR
10063+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10064
10065 config ARCH_HWEIGHT_CFLAGS
10066 string
10067@@ -1018,7 +1018,7 @@ choice
10068
10069 config NOHIGHMEM
10070 bool "off"
10071- depends on !X86_NUMAQ
10072+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10073 ---help---
10074 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10075 However, the address space of 32-bit x86 processors is only 4
10076@@ -1055,7 +1055,7 @@ config NOHIGHMEM
10077
10078 config HIGHMEM4G
10079 bool "4GB"
10080- depends on !X86_NUMAQ
10081+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10082 ---help---
10083 Select this if you have a 32-bit processor and between 1 and 4
10084 gigabytes of physical RAM.
10085@@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10086 hex
10087 default 0xB0000000 if VMSPLIT_3G_OPT
10088 default 0x80000000 if VMSPLIT_2G
10089- default 0x78000000 if VMSPLIT_2G_OPT
10090+ default 0x70000000 if VMSPLIT_2G_OPT
10091 default 0x40000000 if VMSPLIT_1G
10092 default 0xC0000000
10093 depends on X86_32
10094@@ -1483,6 +1483,7 @@ config SECCOMP
10095
10096 config CC_STACKPROTECTOR
10097 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10098+ depends on X86_64 || !PAX_MEMORY_UDEREF
10099 ---help---
10100 This option turns on the -fstack-protector GCC feature. This
10101 feature puts, at the beginning of functions, a canary value on
10102@@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10103 config PHYSICAL_START
10104 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10105 default "0x1000000"
10106+ range 0x400000 0x40000000
10107 ---help---
10108 This gives the physical address where the kernel is loaded.
10109
10110@@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10111 config PHYSICAL_ALIGN
10112 hex "Alignment value to which kernel should be aligned" if X86_32
10113 default "0x1000000"
10114+ range 0x400000 0x1000000 if PAX_KERNEXEC
10115 range 0x2000 0x1000000
10116 ---help---
10117 This value puts the alignment restrictions on physical address
10118@@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10119 Say N if you want to disable CPU hotplug.
10120
10121 config COMPAT_VDSO
10122- def_bool y
10123+ def_bool n
10124 prompt "Compat VDSO support"
10125 depends on X86_32 || IA32_EMULATION
10126+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10127 ---help---
10128 Map the 32-bit VDSO to the predictable old-style address too.
10129
10130diff -urNp linux-3.0.4/arch/x86/Kconfig.cpu linux-3.0.4/arch/x86/Kconfig.cpu
10131--- linux-3.0.4/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
10132+++ linux-3.0.4/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
10133@@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10134
10135 config X86_F00F_BUG
10136 def_bool y
10137- depends on M586MMX || M586TSC || M586 || M486 || M386
10138+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10139
10140 config X86_INVD_BUG
10141 def_bool y
10142@@ -362,7 +362,7 @@ config X86_POPAD_OK
10143
10144 config X86_ALIGNMENT_16
10145 def_bool y
10146- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10147+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10148
10149 config X86_INTEL_USERCOPY
10150 def_bool y
10151@@ -408,7 +408,7 @@ config X86_CMPXCHG64
10152 # generates cmov.
10153 config X86_CMOV
10154 def_bool y
10155- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10156+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10157
10158 config X86_MINIMUM_CPU_FAMILY
10159 int
10160diff -urNp linux-3.0.4/arch/x86/Kconfig.debug linux-3.0.4/arch/x86/Kconfig.debug
10161--- linux-3.0.4/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
10162+++ linux-3.0.4/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
10163@@ -81,7 +81,7 @@ config X86_PTDUMP
10164 config DEBUG_RODATA
10165 bool "Write protect kernel read-only data structures"
10166 default y
10167- depends on DEBUG_KERNEL
10168+ depends on DEBUG_KERNEL && BROKEN
10169 ---help---
10170 Mark the kernel read-only data as write-protected in the pagetables,
10171 in order to catch accidental (and incorrect) writes to such const
10172@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10173
10174 config DEBUG_SET_MODULE_RONX
10175 bool "Set loadable kernel module data as NX and text as RO"
10176- depends on MODULES
10177+ depends on MODULES && BROKEN
10178 ---help---
10179 This option helps catch unintended modifications to loadable
10180 kernel module's text and read-only data. It also prevents execution
10181diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile
10182--- linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10183+++ linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10184@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10185 $(call cc-option, -fno-stack-protector) \
10186 $(call cc-option, -mpreferred-stack-boundary=2)
10187 KBUILD_CFLAGS += $(call cc-option, -m32)
10188+ifdef CONSTIFY_PLUGIN
10189+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10190+endif
10191 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10192 GCOV_PROFILE := n
10193
10194diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S
10195--- linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10196+++ linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10197@@ -108,6 +108,9 @@ wakeup_code:
10198 /* Do any other stuff... */
10199
10200 #ifndef CONFIG_64BIT
10201+ /* Recheck NX bit overrides (64bit path does this in trampoline */
10202+ call verify_cpu
10203+
10204 /* This could also be done in C code... */
10205 movl pmode_cr3, %eax
10206 movl %eax, %cr3
10207@@ -131,6 +134,7 @@ wakeup_code:
10208 movl pmode_cr0, %eax
10209 movl %eax, %cr0
10210 jmp pmode_return
10211+# include "../../verify_cpu.S"
10212 #else
10213 pushw $0
10214 pushw trampoline_segment
10215diff -urNp linux-3.0.4/arch/x86/kernel/acpi/sleep.c linux-3.0.4/arch/x86/kernel/acpi/sleep.c
10216--- linux-3.0.4/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10217+++ linux-3.0.4/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10218@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10219 header->trampoline_segment = trampoline_address() >> 4;
10220 #ifdef CONFIG_SMP
10221 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10222+
10223+ pax_open_kernel();
10224 early_gdt_descr.address =
10225 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10226+ pax_close_kernel();
10227+
10228 initial_gs = per_cpu_offset(smp_processor_id());
10229 #endif
10230 initial_code = (unsigned long)wakeup_long64;
10231diff -urNp linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S
10232--- linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10233+++ linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10234@@ -30,13 +30,11 @@ wakeup_pmode_return:
10235 # and restore the stack ... but you need gdt for this to work
10236 movl saved_context_esp, %esp
10237
10238- movl %cs:saved_magic, %eax
10239- cmpl $0x12345678, %eax
10240+ cmpl $0x12345678, saved_magic
10241 jne bogus_magic
10242
10243 # jump to place where we left off
10244- movl saved_eip, %eax
10245- jmp *%eax
10246+ jmp *(saved_eip)
10247
10248 bogus_magic:
10249 jmp bogus_magic
10250diff -urNp linux-3.0.4/arch/x86/kernel/alternative.c linux-3.0.4/arch/x86/kernel/alternative.c
10251--- linux-3.0.4/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10252+++ linux-3.0.4/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10253@@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10254 if (!*poff || ptr < text || ptr >= text_end)
10255 continue;
10256 /* turn DS segment override prefix into lock prefix */
10257- if (*ptr == 0x3e)
10258+ if (*ktla_ktva(ptr) == 0x3e)
10259 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10260 };
10261 mutex_unlock(&text_mutex);
10262@@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10263 if (!*poff || ptr < text || ptr >= text_end)
10264 continue;
10265 /* turn lock prefix into DS segment override prefix */
10266- if (*ptr == 0xf0)
10267+ if (*ktla_ktva(ptr) == 0xf0)
10268 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10269 };
10270 mutex_unlock(&text_mutex);
10271@@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10272
10273 BUG_ON(p->len > MAX_PATCH_LEN);
10274 /* prep the buffer with the original instructions */
10275- memcpy(insnbuf, p->instr, p->len);
10276+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10277 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10278 (unsigned long)p->instr, p->len);
10279
10280@@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10281 if (smp_alt_once)
10282 free_init_pages("SMP alternatives",
10283 (unsigned long)__smp_locks,
10284- (unsigned long)__smp_locks_end);
10285+ PAGE_ALIGN((unsigned long)__smp_locks_end));
10286
10287 restart_nmi();
10288 }
10289@@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10290 * instructions. And on the local CPU you need to be protected again NMI or MCE
10291 * handlers seeing an inconsistent instruction while you patch.
10292 */
10293-void *__init_or_module text_poke_early(void *addr, const void *opcode,
10294+void *__kprobes text_poke_early(void *addr, const void *opcode,
10295 size_t len)
10296 {
10297 unsigned long flags;
10298 local_irq_save(flags);
10299- memcpy(addr, opcode, len);
10300+
10301+ pax_open_kernel();
10302+ memcpy(ktla_ktva(addr), opcode, len);
10303 sync_core();
10304+ pax_close_kernel();
10305+
10306 local_irq_restore(flags);
10307 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10308 that causes hangs on some VIA CPUs. */
10309@@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10310 */
10311 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10312 {
10313- unsigned long flags;
10314- char *vaddr;
10315+ unsigned char *vaddr = ktla_ktva(addr);
10316 struct page *pages[2];
10317- int i;
10318+ size_t i;
10319
10320 if (!core_kernel_text((unsigned long)addr)) {
10321- pages[0] = vmalloc_to_page(addr);
10322- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10323+ pages[0] = vmalloc_to_page(vaddr);
10324+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10325 } else {
10326- pages[0] = virt_to_page(addr);
10327+ pages[0] = virt_to_page(vaddr);
10328 WARN_ON(!PageReserved(pages[0]));
10329- pages[1] = virt_to_page(addr + PAGE_SIZE);
10330+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10331 }
10332 BUG_ON(!pages[0]);
10333- local_irq_save(flags);
10334- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10335- if (pages[1])
10336- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10337- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10338- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10339- clear_fixmap(FIX_TEXT_POKE0);
10340- if (pages[1])
10341- clear_fixmap(FIX_TEXT_POKE1);
10342- local_flush_tlb();
10343- sync_core();
10344- /* Could also do a CLFLUSH here to speed up CPU recovery; but
10345- that causes hangs on some VIA CPUs. */
10346+ text_poke_early(addr, opcode, len);
10347 for (i = 0; i < len; i++)
10348- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10349- local_irq_restore(flags);
10350+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10351 return addr;
10352 }
10353
10354diff -urNp linux-3.0.4/arch/x86/kernel/apic/apic.c linux-3.0.4/arch/x86/kernel/apic/apic.c
10355--- linux-3.0.4/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10356+++ linux-3.0.4/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10357@@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10358 /*
10359 * Debug level, exported for io_apic.c
10360 */
10361-unsigned int apic_verbosity;
10362+int apic_verbosity;
10363
10364 int pic_mode;
10365
10366@@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10367 apic_write(APIC_ESR, 0);
10368 v1 = apic_read(APIC_ESR);
10369 ack_APIC_irq();
10370- atomic_inc(&irq_err_count);
10371+ atomic_inc_unchecked(&irq_err_count);
10372
10373 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10374 smp_processor_id(), v0 , v1);
10375@@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10376 u16 *bios_cpu_apicid;
10377 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10378
10379+ pax_track_stack();
10380+
10381 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10382 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10383
10384diff -urNp linux-3.0.4/arch/x86/kernel/apic/io_apic.c linux-3.0.4/arch/x86/kernel/apic/io_apic.c
10385--- linux-3.0.4/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10386+++ linux-3.0.4/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10387@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10388 }
10389 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10390
10391-void lock_vector_lock(void)
10392+void lock_vector_lock(void) __acquires(vector_lock)
10393 {
10394 /* Used to the online set of cpus does not change
10395 * during assign_irq_vector.
10396@@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10397 raw_spin_lock(&vector_lock);
10398 }
10399
10400-void unlock_vector_lock(void)
10401+void unlock_vector_lock(void) __releases(vector_lock)
10402 {
10403 raw_spin_unlock(&vector_lock);
10404 }
10405@@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10406 ack_APIC_irq();
10407 }
10408
10409-atomic_t irq_mis_count;
10410+atomic_unchecked_t irq_mis_count;
10411
10412 /*
10413 * IO-APIC versions below 0x20 don't support EOI register.
10414@@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10415 * at the cpu.
10416 */
10417 if (!(v & (1 << (i & 0x1f)))) {
10418- atomic_inc(&irq_mis_count);
10419+ atomic_inc_unchecked(&irq_mis_count);
10420
10421 eoi_ioapic_irq(irq, cfg);
10422 }
10423diff -urNp linux-3.0.4/arch/x86/kernel/apm_32.c linux-3.0.4/arch/x86/kernel/apm_32.c
10424--- linux-3.0.4/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
10425+++ linux-3.0.4/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
10426@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10427 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10428 * even though they are called in protected mode.
10429 */
10430-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10431+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10432 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10433
10434 static const char driver_version[] = "1.16ac"; /* no spaces */
10435@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10436 BUG_ON(cpu != 0);
10437 gdt = get_cpu_gdt_table(cpu);
10438 save_desc_40 = gdt[0x40 / 8];
10439+
10440+ pax_open_kernel();
10441 gdt[0x40 / 8] = bad_bios_desc;
10442+ pax_close_kernel();
10443
10444 apm_irq_save(flags);
10445 APM_DO_SAVE_SEGS;
10446@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10447 &call->esi);
10448 APM_DO_RESTORE_SEGS;
10449 apm_irq_restore(flags);
10450+
10451+ pax_open_kernel();
10452 gdt[0x40 / 8] = save_desc_40;
10453+ pax_close_kernel();
10454+
10455 put_cpu();
10456
10457 return call->eax & 0xff;
10458@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10459 BUG_ON(cpu != 0);
10460 gdt = get_cpu_gdt_table(cpu);
10461 save_desc_40 = gdt[0x40 / 8];
10462+
10463+ pax_open_kernel();
10464 gdt[0x40 / 8] = bad_bios_desc;
10465+ pax_close_kernel();
10466
10467 apm_irq_save(flags);
10468 APM_DO_SAVE_SEGS;
10469@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10470 &call->eax);
10471 APM_DO_RESTORE_SEGS;
10472 apm_irq_restore(flags);
10473+
10474+ pax_open_kernel();
10475 gdt[0x40 / 8] = save_desc_40;
10476+ pax_close_kernel();
10477+
10478 put_cpu();
10479 return error;
10480 }
10481@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10482 * code to that CPU.
10483 */
10484 gdt = get_cpu_gdt_table(0);
10485+
10486+ pax_open_kernel();
10487 set_desc_base(&gdt[APM_CS >> 3],
10488 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10489 set_desc_base(&gdt[APM_CS_16 >> 3],
10490 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10491 set_desc_base(&gdt[APM_DS >> 3],
10492 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10493+ pax_close_kernel();
10494
10495 proc_create("apm", 0, NULL, &apm_file_ops);
10496
10497diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets_64.c linux-3.0.4/arch/x86/kernel/asm-offsets_64.c
10498--- linux-3.0.4/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
10499+++ linux-3.0.4/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
10500@@ -69,6 +69,7 @@ int main(void)
10501 BLANK();
10502 #undef ENTRY
10503
10504+ DEFINE(TSS_size, sizeof(struct tss_struct));
10505 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10506 BLANK();
10507
10508diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets.c linux-3.0.4/arch/x86/kernel/asm-offsets.c
10509--- linux-3.0.4/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
10510+++ linux-3.0.4/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
10511@@ -33,6 +33,8 @@ void common(void) {
10512 OFFSET(TI_status, thread_info, status);
10513 OFFSET(TI_addr_limit, thread_info, addr_limit);
10514 OFFSET(TI_preempt_count, thread_info, preempt_count);
10515+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10516+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10517
10518 BLANK();
10519 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10520@@ -53,8 +55,26 @@ void common(void) {
10521 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10522 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10523 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10524+
10525+#ifdef CONFIG_PAX_KERNEXEC
10526+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10527+#endif
10528+
10529+#ifdef CONFIG_PAX_MEMORY_UDEREF
10530+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10531+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10532+#ifdef CONFIG_X86_64
10533+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10534+#endif
10535 #endif
10536
10537+#endif
10538+
10539+ BLANK();
10540+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10541+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10542+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10543+
10544 #ifdef CONFIG_XEN
10545 BLANK();
10546 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10547diff -urNp linux-3.0.4/arch/x86/kernel/cpu/amd.c linux-3.0.4/arch/x86/kernel/cpu/amd.c
10548--- linux-3.0.4/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
10549+++ linux-3.0.4/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
10550@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10551 unsigned int size)
10552 {
10553 /* AMD errata T13 (order #21922) */
10554- if ((c->x86 == 6)) {
10555+ if (c->x86 == 6) {
10556 /* Duron Rev A0 */
10557 if (c->x86_model == 3 && c->x86_mask == 0)
10558 size = 64;
10559diff -urNp linux-3.0.4/arch/x86/kernel/cpu/common.c linux-3.0.4/arch/x86/kernel/cpu/common.c
10560--- linux-3.0.4/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
10561+++ linux-3.0.4/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
10562@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10563
10564 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10565
10566-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10567-#ifdef CONFIG_X86_64
10568- /*
10569- * We need valid kernel segments for data and code in long mode too
10570- * IRET will check the segment types kkeil 2000/10/28
10571- * Also sysret mandates a special GDT layout
10572- *
10573- * TLS descriptors are currently at a different place compared to i386.
10574- * Hopefully nobody expects them at a fixed place (Wine?)
10575- */
10576- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10577- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10578- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10579- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10580- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10581- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10582-#else
10583- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10584- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10585- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10586- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10587- /*
10588- * Segments used for calling PnP BIOS have byte granularity.
10589- * They code segments and data segments have fixed 64k limits,
10590- * the transfer segment sizes are set at run time.
10591- */
10592- /* 32-bit code */
10593- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10594- /* 16-bit code */
10595- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10596- /* 16-bit data */
10597- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10598- /* 16-bit data */
10599- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10600- /* 16-bit data */
10601- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10602- /*
10603- * The APM segments have byte granularity and their bases
10604- * are set at run time. All have 64k limits.
10605- */
10606- /* 32-bit code */
10607- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10608- /* 16-bit code */
10609- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10610- /* data */
10611- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10612-
10613- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10614- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10615- GDT_STACK_CANARY_INIT
10616-#endif
10617-} };
10618-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10619-
10620 static int __init x86_xsave_setup(char *s)
10621 {
10622 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10623@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10624 {
10625 struct desc_ptr gdt_descr;
10626
10627- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10628+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10629 gdt_descr.size = GDT_SIZE - 1;
10630 load_gdt(&gdt_descr);
10631 /* Reload the per-cpu base */
10632@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10633 /* Filter out anything that depends on CPUID levels we don't have */
10634 filter_cpuid_features(c, true);
10635
10636+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10637+ setup_clear_cpu_cap(X86_FEATURE_SEP);
10638+#endif
10639+
10640 /* If the model name is still unset, do table lookup. */
10641 if (!c->x86_model_id[0]) {
10642 const char *p;
10643@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10644 }
10645 __setup("clearcpuid=", setup_disablecpuid);
10646
10647+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10648+EXPORT_PER_CPU_SYMBOL(current_tinfo);
10649+
10650 #ifdef CONFIG_X86_64
10651 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10652
10653@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10654 EXPORT_PER_CPU_SYMBOL(current_task);
10655
10656 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10657- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10658+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10659 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10660
10661 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10662@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10663 {
10664 memset(regs, 0, sizeof(struct pt_regs));
10665 regs->fs = __KERNEL_PERCPU;
10666- regs->gs = __KERNEL_STACK_CANARY;
10667+ savesegment(gs, regs->gs);
10668
10669 return regs;
10670 }
10671@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10672 int i;
10673
10674 cpu = stack_smp_processor_id();
10675- t = &per_cpu(init_tss, cpu);
10676+ t = init_tss + cpu;
10677 oist = &per_cpu(orig_ist, cpu);
10678
10679 #ifdef CONFIG_NUMA
10680@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10681 switch_to_new_gdt(cpu);
10682 loadsegment(fs, 0);
10683
10684- load_idt((const struct desc_ptr *)&idt_descr);
10685+ load_idt(&idt_descr);
10686
10687 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10688 syscall_init();
10689@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10690 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10691 barrier();
10692
10693- x86_configure_nx();
10694 if (cpu != 0)
10695 enable_x2apic();
10696
10697@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10698 {
10699 int cpu = smp_processor_id();
10700 struct task_struct *curr = current;
10701- struct tss_struct *t = &per_cpu(init_tss, cpu);
10702+ struct tss_struct *t = init_tss + cpu;
10703 struct thread_struct *thread = &curr->thread;
10704
10705 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10706diff -urNp linux-3.0.4/arch/x86/kernel/cpu/intel.c linux-3.0.4/arch/x86/kernel/cpu/intel.c
10707--- linux-3.0.4/arch/x86/kernel/cpu/intel.c 2011-09-02 18:11:26.000000000 -0400
10708+++ linux-3.0.4/arch/x86/kernel/cpu/intel.c 2011-08-29 23:30:14.000000000 -0400
10709@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10710 * Update the IDT descriptor and reload the IDT so that
10711 * it uses the read-only mapped virtual address.
10712 */
10713- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10714+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10715 load_idt(&idt_descr);
10716 }
10717 #endif
10718diff -urNp linux-3.0.4/arch/x86/kernel/cpu/Makefile linux-3.0.4/arch/x86/kernel/cpu/Makefile
10719--- linux-3.0.4/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
10720+++ linux-3.0.4/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
10721@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10722 CFLAGS_REMOVE_perf_event.o = -pg
10723 endif
10724
10725-# Make sure load_percpu_segment has no stackprotector
10726-nostackp := $(call cc-option, -fno-stack-protector)
10727-CFLAGS_common.o := $(nostackp)
10728-
10729 obj-y := intel_cacheinfo.o scattered.o topology.o
10730 obj-y += proc.o capflags.o powerflags.o common.o
10731 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10732diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c
10733--- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
10734+++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
10735@@ -46,6 +46,7 @@
10736 #include <asm/ipi.h>
10737 #include <asm/mce.h>
10738 #include <asm/msr.h>
10739+#include <asm/local.h>
10740
10741 #include "mce-internal.h"
10742
10743@@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
10744 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10745 m->cs, m->ip);
10746
10747- if (m->cs == __KERNEL_CS)
10748+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10749 print_symbol("{%s}", m->ip);
10750 pr_cont("\n");
10751 }
10752@@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
10753
10754 #define PANIC_TIMEOUT 5 /* 5 seconds */
10755
10756-static atomic_t mce_paniced;
10757+static atomic_unchecked_t mce_paniced;
10758
10759 static int fake_panic;
10760-static atomic_t mce_fake_paniced;
10761+static atomic_unchecked_t mce_fake_paniced;
10762
10763 /* Panic in progress. Enable interrupts and wait for final IPI */
10764 static void wait_for_panic(void)
10765@@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
10766 /*
10767 * Make sure only one CPU runs in machine check panic
10768 */
10769- if (atomic_inc_return(&mce_paniced) > 1)
10770+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10771 wait_for_panic();
10772 barrier();
10773
10774@@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10775 console_verbose();
10776 } else {
10777 /* Don't log too much for fake panic */
10778- if (atomic_inc_return(&mce_fake_paniced) > 1)
10779+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10780 return;
10781 }
10782 /* First print corrected ones that are still unlogged */
10783@@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
10784 * might have been modified by someone else.
10785 */
10786 rmb();
10787- if (atomic_read(&mce_paniced))
10788+ if (atomic_read_unchecked(&mce_paniced))
10789 wait_for_panic();
10790 if (!monarch_timeout)
10791 goto out;
10792@@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10793 */
10794
10795 static DEFINE_SPINLOCK(mce_state_lock);
10796-static int open_count; /* #times opened */
10797+static local_t open_count; /* #times opened */
10798 static int open_exclu; /* already open exclusive? */
10799
10800 static int mce_open(struct inode *inode, struct file *file)
10801 {
10802 spin_lock(&mce_state_lock);
10803
10804- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10805+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10806 spin_unlock(&mce_state_lock);
10807
10808 return -EBUSY;
10809@@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
10810
10811 if (file->f_flags & O_EXCL)
10812 open_exclu = 1;
10813- open_count++;
10814+ local_inc(&open_count);
10815
10816 spin_unlock(&mce_state_lock);
10817
10818@@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
10819 {
10820 spin_lock(&mce_state_lock);
10821
10822- open_count--;
10823+ local_dec(&open_count);
10824 open_exclu = 0;
10825
10826 spin_unlock(&mce_state_lock);
10827@@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
10828 static void mce_reset(void)
10829 {
10830 cpu_missing = 0;
10831- atomic_set(&mce_fake_paniced, 0);
10832+ atomic_set_unchecked(&mce_fake_paniced, 0);
10833 atomic_set(&mce_executing, 0);
10834 atomic_set(&mce_callin, 0);
10835 atomic_set(&global_nwo, 0);
10836diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c
10837--- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
10838+++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
10839@@ -215,7 +215,9 @@ static int inject_init(void)
10840 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10841 return -ENOMEM;
10842 printk(KERN_INFO "Machine check injector initialized\n");
10843- mce_chrdev_ops.write = mce_write;
10844+ pax_open_kernel();
10845+ *(void **)&mce_chrdev_ops.write = mce_write;
10846+ pax_close_kernel();
10847 register_die_notifier(&mce_raise_nb);
10848 return 0;
10849 }
10850diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c
10851--- linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-09-02 18:11:26.000000000 -0400
10852+++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:21.000000000 -0400
10853@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10854 u64 size_or_mask, size_and_mask;
10855 static bool mtrr_aps_delayed_init;
10856
10857-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10858+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10859
10860 const struct mtrr_ops *mtrr_if;
10861
10862diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h
10863--- linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
10864+++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 19:49:56.000000000 -0400
10865@@ -25,7 +25,7 @@ struct mtrr_ops {
10866 int (*validate_add_page)(unsigned long base, unsigned long size,
10867 unsigned int type);
10868 int (*have_wrcomb)(void);
10869-};
10870+} __do_const;
10871
10872 extern int generic_get_free_region(unsigned long base, unsigned long size,
10873 int replace_reg);
10874diff -urNp linux-3.0.4/arch/x86/kernel/cpu/perf_event.c linux-3.0.4/arch/x86/kernel/cpu/perf_event.c
10875--- linux-3.0.4/arch/x86/kernel/cpu/perf_event.c 2011-07-21 22:17:23.000000000 -0400
10876+++ linux-3.0.4/arch/x86/kernel/cpu/perf_event.c 2011-08-23 21:48:14.000000000 -0400
10877@@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
10878 int i, j, w, wmax, num = 0;
10879 struct hw_perf_event *hwc;
10880
10881+ pax_track_stack();
10882+
10883 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10884
10885 for (i = 0; i < n; i++) {
10886@@ -1872,7 +1874,7 @@ perf_callchain_user(struct perf_callchai
10887 break;
10888
10889 perf_callchain_store(entry, frame.return_address);
10890- fp = frame.next_frame;
10891+ fp = (__force const void __user *)frame.next_frame;
10892 }
10893 }
10894
10895diff -urNp linux-3.0.4/arch/x86/kernel/crash.c linux-3.0.4/arch/x86/kernel/crash.c
10896--- linux-3.0.4/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
10897+++ linux-3.0.4/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
10898@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10899 regs = args->regs;
10900
10901 #ifdef CONFIG_X86_32
10902- if (!user_mode_vm(regs)) {
10903+ if (!user_mode(regs)) {
10904 crash_fixup_ss_esp(&fixed_regs, regs);
10905 regs = &fixed_regs;
10906 }
10907diff -urNp linux-3.0.4/arch/x86/kernel/doublefault_32.c linux-3.0.4/arch/x86/kernel/doublefault_32.c
10908--- linux-3.0.4/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
10909+++ linux-3.0.4/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
10910@@ -11,7 +11,7 @@
10911
10912 #define DOUBLEFAULT_STACKSIZE (1024)
10913 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10914-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10915+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10916
10917 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10918
10919@@ -21,7 +21,7 @@ static void doublefault_fn(void)
10920 unsigned long gdt, tss;
10921
10922 store_gdt(&gdt_desc);
10923- gdt = gdt_desc.address;
10924+ gdt = (unsigned long)gdt_desc.address;
10925
10926 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10927
10928@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10929 /* 0x2 bit is always set */
10930 .flags = X86_EFLAGS_SF | 0x2,
10931 .sp = STACK_START,
10932- .es = __USER_DS,
10933+ .es = __KERNEL_DS,
10934 .cs = __KERNEL_CS,
10935 .ss = __KERNEL_DS,
10936- .ds = __USER_DS,
10937+ .ds = __KERNEL_DS,
10938 .fs = __KERNEL_PERCPU,
10939
10940 .__cr3 = __pa_nodebug(swapper_pg_dir),
10941diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_32.c linux-3.0.4/arch/x86/kernel/dumpstack_32.c
10942--- linux-3.0.4/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
10943+++ linux-3.0.4/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
10944@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
10945 bp = stack_frame(task, regs);
10946
10947 for (;;) {
10948- struct thread_info *context;
10949+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
10950
10951- context = (struct thread_info *)
10952- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
10953- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
10954+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
10955
10956- stack = (unsigned long *)context->previous_esp;
10957- if (!stack)
10958+ if (stack_start == task_stack_page(task))
10959 break;
10960+ stack = *(unsigned long **)stack_start;
10961 if (ops->stack(data, "IRQ") < 0)
10962 break;
10963 touch_nmi_watchdog();
10964@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
10965 * When in-kernel, we also print out the stack and code at the
10966 * time of the fault..
10967 */
10968- if (!user_mode_vm(regs)) {
10969+ if (!user_mode(regs)) {
10970 unsigned int code_prologue = code_bytes * 43 / 64;
10971 unsigned int code_len = code_bytes;
10972 unsigned char c;
10973 u8 *ip;
10974+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
10975
10976 printk(KERN_EMERG "Stack:\n");
10977 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
10978
10979 printk(KERN_EMERG "Code: ");
10980
10981- ip = (u8 *)regs->ip - code_prologue;
10982+ ip = (u8 *)regs->ip - code_prologue + cs_base;
10983 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
10984 /* try starting at IP */
10985- ip = (u8 *)regs->ip;
10986+ ip = (u8 *)regs->ip + cs_base;
10987 code_len = code_len - code_prologue + 1;
10988 }
10989 for (i = 0; i < code_len; i++, ip++) {
10990@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
10991 printk(" Bad EIP value.");
10992 break;
10993 }
10994- if (ip == (u8 *)regs->ip)
10995+ if (ip == (u8 *)regs->ip + cs_base)
10996 printk("<%02x> ", c);
10997 else
10998 printk("%02x ", c);
10999@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11000 {
11001 unsigned short ud2;
11002
11003+ ip = ktla_ktva(ip);
11004 if (ip < PAGE_OFFSET)
11005 return 0;
11006 if (probe_kernel_address((unsigned short *)ip, ud2))
11007diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_64.c linux-3.0.4/arch/x86/kernel/dumpstack_64.c
11008--- linux-3.0.4/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11009+++ linux-3.0.4/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11010@@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11011 unsigned long *irq_stack_end =
11012 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11013 unsigned used = 0;
11014- struct thread_info *tinfo;
11015 int graph = 0;
11016 unsigned long dummy;
11017+ void *stack_start;
11018
11019 if (!task)
11020 task = current;
11021@@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11022 * current stack address. If the stacks consist of nested
11023 * exceptions
11024 */
11025- tinfo = task_thread_info(task);
11026 for (;;) {
11027 char *id;
11028 unsigned long *estack_end;
11029+
11030 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11031 &used, &id);
11032
11033@@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11034 if (ops->stack(data, id) < 0)
11035 break;
11036
11037- bp = ops->walk_stack(tinfo, stack, bp, ops,
11038+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11039 data, estack_end, &graph);
11040 ops->stack(data, "<EOE>");
11041 /*
11042@@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11043 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11044 if (ops->stack(data, "IRQ") < 0)
11045 break;
11046- bp = ops->walk_stack(tinfo, stack, bp,
11047+ bp = ops->walk_stack(task, irq_stack, stack, bp,
11048 ops, data, irq_stack_end, &graph);
11049 /*
11050 * We link to the next stack (which would be
11051@@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11052 /*
11053 * This handles the process stack:
11054 */
11055- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11056+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11057+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11058 put_cpu();
11059 }
11060 EXPORT_SYMBOL(dump_trace);
11061diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack.c linux-3.0.4/arch/x86/kernel/dumpstack.c
11062--- linux-3.0.4/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11063+++ linux-3.0.4/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11064@@ -2,6 +2,9 @@
11065 * Copyright (C) 1991, 1992 Linus Torvalds
11066 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11067 */
11068+#ifdef CONFIG_GRKERNSEC_HIDESYM
11069+#define __INCLUDED_BY_HIDESYM 1
11070+#endif
11071 #include <linux/kallsyms.h>
11072 #include <linux/kprobes.h>
11073 #include <linux/uaccess.h>
11074@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11075 static void
11076 print_ftrace_graph_addr(unsigned long addr, void *data,
11077 const struct stacktrace_ops *ops,
11078- struct thread_info *tinfo, int *graph)
11079+ struct task_struct *task, int *graph)
11080 {
11081- struct task_struct *task = tinfo->task;
11082 unsigned long ret_addr;
11083 int index = task->curr_ret_stack;
11084
11085@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11086 static inline void
11087 print_ftrace_graph_addr(unsigned long addr, void *data,
11088 const struct stacktrace_ops *ops,
11089- struct thread_info *tinfo, int *graph)
11090+ struct task_struct *task, int *graph)
11091 { }
11092 #endif
11093
11094@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11095 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11096 */
11097
11098-static inline int valid_stack_ptr(struct thread_info *tinfo,
11099- void *p, unsigned int size, void *end)
11100+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11101 {
11102- void *t = tinfo;
11103 if (end) {
11104 if (p < end && p >= (end-THREAD_SIZE))
11105 return 1;
11106@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11107 }
11108
11109 unsigned long
11110-print_context_stack(struct thread_info *tinfo,
11111+print_context_stack(struct task_struct *task, void *stack_start,
11112 unsigned long *stack, unsigned long bp,
11113 const struct stacktrace_ops *ops, void *data,
11114 unsigned long *end, int *graph)
11115 {
11116 struct stack_frame *frame = (struct stack_frame *)bp;
11117
11118- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11119+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11120 unsigned long addr;
11121
11122 addr = *stack;
11123@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11124 } else {
11125 ops->address(data, addr, 0);
11126 }
11127- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11128+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11129 }
11130 stack++;
11131 }
11132@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11133 EXPORT_SYMBOL_GPL(print_context_stack);
11134
11135 unsigned long
11136-print_context_stack_bp(struct thread_info *tinfo,
11137+print_context_stack_bp(struct task_struct *task, void *stack_start,
11138 unsigned long *stack, unsigned long bp,
11139 const struct stacktrace_ops *ops, void *data,
11140 unsigned long *end, int *graph)
11141@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11142 struct stack_frame *frame = (struct stack_frame *)bp;
11143 unsigned long *ret_addr = &frame->return_address;
11144
11145- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11146+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11147 unsigned long addr = *ret_addr;
11148
11149 if (!__kernel_text_address(addr))
11150@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11151 ops->address(data, addr, 1);
11152 frame = frame->next_frame;
11153 ret_addr = &frame->return_address;
11154- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11155+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11156 }
11157
11158 return (unsigned long)frame;
11159@@ -186,7 +186,7 @@ void dump_stack(void)
11160
11161 bp = stack_frame(current, NULL);
11162 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11163- current->pid, current->comm, print_tainted(),
11164+ task_pid_nr(current), current->comm, print_tainted(),
11165 init_utsname()->release,
11166 (int)strcspn(init_utsname()->version, " "),
11167 init_utsname()->version);
11168@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11169 }
11170 EXPORT_SYMBOL_GPL(oops_begin);
11171
11172+extern void gr_handle_kernel_exploit(void);
11173+
11174 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11175 {
11176 if (regs && kexec_should_crash(current))
11177@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11178 panic("Fatal exception in interrupt");
11179 if (panic_on_oops)
11180 panic("Fatal exception");
11181- do_exit(signr);
11182+
11183+ gr_handle_kernel_exploit();
11184+
11185+ do_group_exit(signr);
11186 }
11187
11188 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11189@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11190
11191 show_registers(regs);
11192 #ifdef CONFIG_X86_32
11193- if (user_mode_vm(regs)) {
11194+ if (user_mode(regs)) {
11195 sp = regs->sp;
11196 ss = regs->ss & 0xffff;
11197 } else {
11198@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11199 unsigned long flags = oops_begin();
11200 int sig = SIGSEGV;
11201
11202- if (!user_mode_vm(regs))
11203+ if (!user_mode(regs))
11204 report_bug(regs->ip, regs);
11205
11206 if (__die(str, regs, err))
11207diff -urNp linux-3.0.4/arch/x86/kernel/early_printk.c linux-3.0.4/arch/x86/kernel/early_printk.c
11208--- linux-3.0.4/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11209+++ linux-3.0.4/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11210@@ -7,6 +7,7 @@
11211 #include <linux/pci_regs.h>
11212 #include <linux/pci_ids.h>
11213 #include <linux/errno.h>
11214+#include <linux/sched.h>
11215 #include <asm/io.h>
11216 #include <asm/processor.h>
11217 #include <asm/fcntl.h>
11218@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11219 int n;
11220 va_list ap;
11221
11222+ pax_track_stack();
11223+
11224 va_start(ap, fmt);
11225 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11226 early_console->write(early_console, buf, n);
11227diff -urNp linux-3.0.4/arch/x86/kernel/entry_32.S linux-3.0.4/arch/x86/kernel/entry_32.S
11228--- linux-3.0.4/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11229+++ linux-3.0.4/arch/x86/kernel/entry_32.S 2011-08-30 18:23:52.000000000 -0400
11230@@ -185,13 +185,146 @@
11231 /*CFI_REL_OFFSET gs, PT_GS*/
11232 .endm
11233 .macro SET_KERNEL_GS reg
11234+
11235+#ifdef CONFIG_CC_STACKPROTECTOR
11236 movl $(__KERNEL_STACK_CANARY), \reg
11237+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11238+ movl $(__USER_DS), \reg
11239+#else
11240+ xorl \reg, \reg
11241+#endif
11242+
11243 movl \reg, %gs
11244 .endm
11245
11246 #endif /* CONFIG_X86_32_LAZY_GS */
11247
11248-.macro SAVE_ALL
11249+.macro pax_enter_kernel
11250+#ifdef CONFIG_PAX_KERNEXEC
11251+ call pax_enter_kernel
11252+#endif
11253+.endm
11254+
11255+.macro pax_exit_kernel
11256+#ifdef CONFIG_PAX_KERNEXEC
11257+ call pax_exit_kernel
11258+#endif
11259+.endm
11260+
11261+#ifdef CONFIG_PAX_KERNEXEC
11262+ENTRY(pax_enter_kernel)
11263+#ifdef CONFIG_PARAVIRT
11264+ pushl %eax
11265+ pushl %ecx
11266+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11267+ mov %eax, %esi
11268+#else
11269+ mov %cr0, %esi
11270+#endif
11271+ bts $16, %esi
11272+ jnc 1f
11273+ mov %cs, %esi
11274+ cmp $__KERNEL_CS, %esi
11275+ jz 3f
11276+ ljmp $__KERNEL_CS, $3f
11277+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11278+2:
11279+#ifdef CONFIG_PARAVIRT
11280+ mov %esi, %eax
11281+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11282+#else
11283+ mov %esi, %cr0
11284+#endif
11285+3:
11286+#ifdef CONFIG_PARAVIRT
11287+ popl %ecx
11288+ popl %eax
11289+#endif
11290+ ret
11291+ENDPROC(pax_enter_kernel)
11292+
11293+ENTRY(pax_exit_kernel)
11294+#ifdef CONFIG_PARAVIRT
11295+ pushl %eax
11296+ pushl %ecx
11297+#endif
11298+ mov %cs, %esi
11299+ cmp $__KERNEXEC_KERNEL_CS, %esi
11300+ jnz 2f
11301+#ifdef CONFIG_PARAVIRT
11302+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11303+ mov %eax, %esi
11304+#else
11305+ mov %cr0, %esi
11306+#endif
11307+ btr $16, %esi
11308+ ljmp $__KERNEL_CS, $1f
11309+1:
11310+#ifdef CONFIG_PARAVIRT
11311+ mov %esi, %eax
11312+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11313+#else
11314+ mov %esi, %cr0
11315+#endif
11316+2:
11317+#ifdef CONFIG_PARAVIRT
11318+ popl %ecx
11319+ popl %eax
11320+#endif
11321+ ret
11322+ENDPROC(pax_exit_kernel)
11323+#endif
11324+
11325+.macro pax_erase_kstack
11326+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11327+ call pax_erase_kstack
11328+#endif
11329+.endm
11330+
11331+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11332+/*
11333+ * ebp: thread_info
11334+ * ecx, edx: can be clobbered
11335+ */
11336+ENTRY(pax_erase_kstack)
11337+ pushl %edi
11338+ pushl %eax
11339+
11340+ mov TI_lowest_stack(%ebp), %edi
11341+ mov $-0xBEEF, %eax
11342+ std
11343+
11344+1: mov %edi, %ecx
11345+ and $THREAD_SIZE_asm - 1, %ecx
11346+ shr $2, %ecx
11347+ repne scasl
11348+ jecxz 2f
11349+
11350+ cmp $2*16, %ecx
11351+ jc 2f
11352+
11353+ mov $2*16, %ecx
11354+ repe scasl
11355+ jecxz 2f
11356+ jne 1b
11357+
11358+2: cld
11359+ mov %esp, %ecx
11360+ sub %edi, %ecx
11361+ shr $2, %ecx
11362+ rep stosl
11363+
11364+ mov TI_task_thread_sp0(%ebp), %edi
11365+ sub $128, %edi
11366+ mov %edi, TI_lowest_stack(%ebp)
11367+
11368+ popl %eax
11369+ popl %edi
11370+ ret
11371+ENDPROC(pax_erase_kstack)
11372+#endif
11373+
11374+.macro __SAVE_ALL _DS
11375 cld
11376 PUSH_GS
11377 pushl_cfi %fs
11378@@ -214,7 +347,7 @@
11379 CFI_REL_OFFSET ecx, 0
11380 pushl_cfi %ebx
11381 CFI_REL_OFFSET ebx, 0
11382- movl $(__USER_DS), %edx
11383+ movl $\_DS, %edx
11384 movl %edx, %ds
11385 movl %edx, %es
11386 movl $(__KERNEL_PERCPU), %edx
11387@@ -222,6 +355,15 @@
11388 SET_KERNEL_GS %edx
11389 .endm
11390
11391+.macro SAVE_ALL
11392+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11393+ __SAVE_ALL __KERNEL_DS
11394+ pax_enter_kernel
11395+#else
11396+ __SAVE_ALL __USER_DS
11397+#endif
11398+.endm
11399+
11400 .macro RESTORE_INT_REGS
11401 popl_cfi %ebx
11402 CFI_RESTORE ebx
11403@@ -332,7 +474,15 @@ check_userspace:
11404 movb PT_CS(%esp), %al
11405 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11406 cmpl $USER_RPL, %eax
11407+
11408+#ifdef CONFIG_PAX_KERNEXEC
11409+ jae resume_userspace
11410+
11411+ PAX_EXIT_KERNEL
11412+ jmp resume_kernel
11413+#else
11414 jb resume_kernel # not returning to v8086 or userspace
11415+#endif
11416
11417 ENTRY(resume_userspace)
11418 LOCKDEP_SYS_EXIT
11419@@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11420 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11421 # int/exception return?
11422 jne work_pending
11423- jmp restore_all
11424+ jmp restore_all_pax
11425 END(ret_from_exception)
11426
11427 #ifdef CONFIG_PREEMPT
11428@@ -394,23 +544,34 @@ sysenter_past_esp:
11429 /*CFI_REL_OFFSET cs, 0*/
11430 /*
11431 * Push current_thread_info()->sysenter_return to the stack.
11432- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11433- * pushed above; +8 corresponds to copy_thread's esp0 setting.
11434 */
11435- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11436+ pushl_cfi $0
11437 CFI_REL_OFFSET eip, 0
11438
11439 pushl_cfi %eax
11440 SAVE_ALL
11441+ GET_THREAD_INFO(%ebp)
11442+ movl TI_sysenter_return(%ebp),%ebp
11443+ movl %ebp,PT_EIP(%esp)
11444 ENABLE_INTERRUPTS(CLBR_NONE)
11445
11446 /*
11447 * Load the potential sixth argument from user stack.
11448 * Careful about security.
11449 */
11450+ movl PT_OLDESP(%esp),%ebp
11451+
11452+#ifdef CONFIG_PAX_MEMORY_UDEREF
11453+ mov PT_OLDSS(%esp),%ds
11454+1: movl %ds:(%ebp),%ebp
11455+ push %ss
11456+ pop %ds
11457+#else
11458 cmpl $__PAGE_OFFSET-3,%ebp
11459 jae syscall_fault
11460 1: movl (%ebp),%ebp
11461+#endif
11462+
11463 movl %ebp,PT_EBP(%esp)
11464 .section __ex_table,"a"
11465 .align 4
11466@@ -433,12 +594,24 @@ sysenter_do_call:
11467 testl $_TIF_ALLWORK_MASK, %ecx
11468 jne sysexit_audit
11469 sysenter_exit:
11470+
11471+#ifdef CONFIG_PAX_RANDKSTACK
11472+ pushl_cfi %eax
11473+ movl %esp, %eax
11474+ call pax_randomize_kstack
11475+ popl_cfi %eax
11476+#endif
11477+
11478+ pax_erase_kstack
11479+
11480 /* if something modifies registers it must also disable sysexit */
11481 movl PT_EIP(%esp), %edx
11482 movl PT_OLDESP(%esp), %ecx
11483 xorl %ebp,%ebp
11484 TRACE_IRQS_ON
11485 1: mov PT_FS(%esp), %fs
11486+2: mov PT_DS(%esp), %ds
11487+3: mov PT_ES(%esp), %es
11488 PTGS_TO_GS
11489 ENABLE_INTERRUPTS_SYSEXIT
11490
11491@@ -455,6 +628,9 @@ sysenter_audit:
11492 movl %eax,%edx /* 2nd arg: syscall number */
11493 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11494 call audit_syscall_entry
11495+
11496+ pax_erase_kstack
11497+
11498 pushl_cfi %ebx
11499 movl PT_EAX(%esp),%eax /* reload syscall number */
11500 jmp sysenter_do_call
11501@@ -481,11 +657,17 @@ sysexit_audit:
11502
11503 CFI_ENDPROC
11504 .pushsection .fixup,"ax"
11505-2: movl $0,PT_FS(%esp)
11506+4: movl $0,PT_FS(%esp)
11507+ jmp 1b
11508+5: movl $0,PT_DS(%esp)
11509+ jmp 1b
11510+6: movl $0,PT_ES(%esp)
11511 jmp 1b
11512 .section __ex_table,"a"
11513 .align 4
11514- .long 1b,2b
11515+ .long 1b,4b
11516+ .long 2b,5b
11517+ .long 3b,6b
11518 .popsection
11519 PTGS_TO_GS_EX
11520 ENDPROC(ia32_sysenter_target)
11521@@ -518,6 +700,15 @@ syscall_exit:
11522 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11523 jne syscall_exit_work
11524
11525+restore_all_pax:
11526+
11527+#ifdef CONFIG_PAX_RANDKSTACK
11528+ movl %esp, %eax
11529+ call pax_randomize_kstack
11530+#endif
11531+
11532+ pax_erase_kstack
11533+
11534 restore_all:
11535 TRACE_IRQS_IRET
11536 restore_all_notrace:
11537@@ -577,14 +768,34 @@ ldt_ss:
11538 * compensating for the offset by changing to the ESPFIX segment with
11539 * a base address that matches for the difference.
11540 */
11541-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11542+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11543 mov %esp, %edx /* load kernel esp */
11544 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11545 mov %dx, %ax /* eax: new kernel esp */
11546 sub %eax, %edx /* offset (low word is 0) */
11547+#ifdef CONFIG_SMP
11548+ movl PER_CPU_VAR(cpu_number), %ebx
11549+ shll $PAGE_SHIFT_asm, %ebx
11550+ addl $cpu_gdt_table, %ebx
11551+#else
11552+ movl $cpu_gdt_table, %ebx
11553+#endif
11554 shr $16, %edx
11555- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11556- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11557+
11558+#ifdef CONFIG_PAX_KERNEXEC
11559+ mov %cr0, %esi
11560+ btr $16, %esi
11561+ mov %esi, %cr0
11562+#endif
11563+
11564+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11565+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11566+
11567+#ifdef CONFIG_PAX_KERNEXEC
11568+ bts $16, %esi
11569+ mov %esi, %cr0
11570+#endif
11571+
11572 pushl_cfi $__ESPFIX_SS
11573 pushl_cfi %eax /* new kernel esp */
11574 /* Disable interrupts, but do not irqtrace this section: we
11575@@ -613,29 +824,23 @@ work_resched:
11576 movl TI_flags(%ebp), %ecx
11577 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11578 # than syscall tracing?
11579- jz restore_all
11580+ jz restore_all_pax
11581 testb $_TIF_NEED_RESCHED, %cl
11582 jnz work_resched
11583
11584 work_notifysig: # deal with pending signals and
11585 # notify-resume requests
11586+ movl %esp, %eax
11587 #ifdef CONFIG_VM86
11588 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11589- movl %esp, %eax
11590- jne work_notifysig_v86 # returning to kernel-space or
11591+ jz 1f # returning to kernel-space or
11592 # vm86-space
11593- xorl %edx, %edx
11594- call do_notify_resume
11595- jmp resume_userspace_sig
11596
11597- ALIGN
11598-work_notifysig_v86:
11599 pushl_cfi %ecx # save ti_flags for do_notify_resume
11600 call save_v86_state # %eax contains pt_regs pointer
11601 popl_cfi %ecx
11602 movl %eax, %esp
11603-#else
11604- movl %esp, %eax
11605+1:
11606 #endif
11607 xorl %edx, %edx
11608 call do_notify_resume
11609@@ -648,6 +853,9 @@ syscall_trace_entry:
11610 movl $-ENOSYS,PT_EAX(%esp)
11611 movl %esp, %eax
11612 call syscall_trace_enter
11613+
11614+ pax_erase_kstack
11615+
11616 /* What it returned is what we'll actually use. */
11617 cmpl $(nr_syscalls), %eax
11618 jnae syscall_call
11619@@ -670,6 +878,10 @@ END(syscall_exit_work)
11620
11621 RING0_INT_FRAME # can't unwind into user space anyway
11622 syscall_fault:
11623+#ifdef CONFIG_PAX_MEMORY_UDEREF
11624+ push %ss
11625+ pop %ds
11626+#endif
11627 GET_THREAD_INFO(%ebp)
11628 movl $-EFAULT,PT_EAX(%esp)
11629 jmp resume_userspace
11630@@ -752,6 +964,36 @@ ptregs_clone:
11631 CFI_ENDPROC
11632 ENDPROC(ptregs_clone)
11633
11634+ ALIGN;
11635+ENTRY(kernel_execve)
11636+ CFI_STARTPROC
11637+ pushl_cfi %ebp
11638+ sub $PT_OLDSS+4,%esp
11639+ pushl_cfi %edi
11640+ pushl_cfi %ecx
11641+ pushl_cfi %eax
11642+ lea 3*4(%esp),%edi
11643+ mov $PT_OLDSS/4+1,%ecx
11644+ xorl %eax,%eax
11645+ rep stosl
11646+ popl_cfi %eax
11647+ popl_cfi %ecx
11648+ popl_cfi %edi
11649+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11650+ pushl_cfi %esp
11651+ call sys_execve
11652+ add $4,%esp
11653+ CFI_ADJUST_CFA_OFFSET -4
11654+ GET_THREAD_INFO(%ebp)
11655+ test %eax,%eax
11656+ jz syscall_exit
11657+ add $PT_OLDSS+4,%esp
11658+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11659+ popl_cfi %ebp
11660+ ret
11661+ CFI_ENDPROC
11662+ENDPROC(kernel_execve)
11663+
11664 .macro FIXUP_ESPFIX_STACK
11665 /*
11666 * Switch back for ESPFIX stack to the normal zerobased stack
11667@@ -761,8 +1003,15 @@ ENDPROC(ptregs_clone)
11668 * normal stack and adjusts ESP with the matching offset.
11669 */
11670 /* fixup the stack */
11671- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11672- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11673+#ifdef CONFIG_SMP
11674+ movl PER_CPU_VAR(cpu_number), %ebx
11675+ shll $PAGE_SHIFT_asm, %ebx
11676+ addl $cpu_gdt_table, %ebx
11677+#else
11678+ movl $cpu_gdt_table, %ebx
11679+#endif
11680+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11681+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11682 shl $16, %eax
11683 addl %esp, %eax /* the adjusted stack pointer */
11684 pushl_cfi $__KERNEL_DS
11685@@ -1213,7 +1462,6 @@ return_to_handler:
11686 jmp *%ecx
11687 #endif
11688
11689-.section .rodata,"a"
11690 #include "syscall_table_32.S"
11691
11692 syscall_table_size=(.-sys_call_table)
11693@@ -1259,9 +1507,12 @@ error_code:
11694 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11695 REG_TO_PTGS %ecx
11696 SET_KERNEL_GS %ecx
11697- movl $(__USER_DS), %ecx
11698+ movl $(__KERNEL_DS), %ecx
11699 movl %ecx, %ds
11700 movl %ecx, %es
11701+
11702+ pax_enter_kernel
11703+
11704 TRACE_IRQS_OFF
11705 movl %esp,%eax # pt_regs pointer
11706 call *%edi
11707@@ -1346,6 +1597,9 @@ nmi_stack_correct:
11708 xorl %edx,%edx # zero error code
11709 movl %esp,%eax # pt_regs pointer
11710 call do_nmi
11711+
11712+ pax_exit_kernel
11713+
11714 jmp restore_all_notrace
11715 CFI_ENDPROC
11716
11717@@ -1382,6 +1636,9 @@ nmi_espfix_stack:
11718 FIXUP_ESPFIX_STACK # %eax == %esp
11719 xorl %edx,%edx # zero error code
11720 call do_nmi
11721+
11722+ pax_exit_kernel
11723+
11724 RESTORE_REGS
11725 lss 12+4(%esp), %esp # back to espfix stack
11726 CFI_ADJUST_CFA_OFFSET -24
11727diff -urNp linux-3.0.4/arch/x86/kernel/entry_64.S linux-3.0.4/arch/x86/kernel/entry_64.S
11728--- linux-3.0.4/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
11729+++ linux-3.0.4/arch/x86/kernel/entry_64.S 2011-08-26 19:49:56.000000000 -0400
11730@@ -53,6 +53,7 @@
11731 #include <asm/paravirt.h>
11732 #include <asm/ftrace.h>
11733 #include <asm/percpu.h>
11734+#include <asm/pgtable.h>
11735
11736 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11737 #include <linux/elf-em.h>
11738@@ -176,6 +177,264 @@ ENTRY(native_usergs_sysret64)
11739 ENDPROC(native_usergs_sysret64)
11740 #endif /* CONFIG_PARAVIRT */
11741
11742+ .macro ljmpq sel, off
11743+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11744+ .byte 0x48; ljmp *1234f(%rip)
11745+ .pushsection .rodata
11746+ .align 16
11747+ 1234: .quad \off; .word \sel
11748+ .popsection
11749+#else
11750+ pushq $\sel
11751+ pushq $\off
11752+ lretq
11753+#endif
11754+ .endm
11755+
11756+ .macro pax_enter_kernel
11757+#ifdef CONFIG_PAX_KERNEXEC
11758+ call pax_enter_kernel
11759+#endif
11760+ .endm
11761+
11762+ .macro pax_exit_kernel
11763+#ifdef CONFIG_PAX_KERNEXEC
11764+ call pax_exit_kernel
11765+#endif
11766+ .endm
11767+
11768+#ifdef CONFIG_PAX_KERNEXEC
11769+ENTRY(pax_enter_kernel)
11770+ pushq %rdi
11771+
11772+#ifdef CONFIG_PARAVIRT
11773+ PV_SAVE_REGS(CLBR_RDI)
11774+#endif
11775+
11776+ GET_CR0_INTO_RDI
11777+ bts $16,%rdi
11778+ jnc 1f
11779+ mov %cs,%edi
11780+ cmp $__KERNEL_CS,%edi
11781+ jz 3f
11782+ ljmpq __KERNEL_CS,3f
11783+1: ljmpq __KERNEXEC_KERNEL_CS,2f
11784+2: SET_RDI_INTO_CR0
11785+3:
11786+
11787+#ifdef CONFIG_PARAVIRT
11788+ PV_RESTORE_REGS(CLBR_RDI)
11789+#endif
11790+
11791+ popq %rdi
11792+ retq
11793+ENDPROC(pax_enter_kernel)
11794+
11795+ENTRY(pax_exit_kernel)
11796+ pushq %rdi
11797+
11798+#ifdef CONFIG_PARAVIRT
11799+ PV_SAVE_REGS(CLBR_RDI)
11800+#endif
11801+
11802+ mov %cs,%rdi
11803+ cmp $__KERNEXEC_KERNEL_CS,%edi
11804+ jnz 2f
11805+ GET_CR0_INTO_RDI
11806+ btr $16,%rdi
11807+ ljmpq __KERNEL_CS,1f
11808+1: SET_RDI_INTO_CR0
11809+2:
11810+
11811+#ifdef CONFIG_PARAVIRT
11812+ PV_RESTORE_REGS(CLBR_RDI);
11813+#endif
11814+
11815+ popq %rdi
11816+ retq
11817+ENDPROC(pax_exit_kernel)
11818+#endif
11819+
11820+ .macro pax_enter_kernel_user
11821+#ifdef CONFIG_PAX_MEMORY_UDEREF
11822+ call pax_enter_kernel_user
11823+#endif
11824+ .endm
11825+
11826+ .macro pax_exit_kernel_user
11827+#ifdef CONFIG_PAX_MEMORY_UDEREF
11828+ call pax_exit_kernel_user
11829+#endif
11830+#ifdef CONFIG_PAX_RANDKSTACK
11831+ push %rax
11832+ call pax_randomize_kstack
11833+ pop %rax
11834+#endif
11835+ .endm
11836+
11837+#ifdef CONFIG_PAX_MEMORY_UDEREF
11838+ENTRY(pax_enter_kernel_user)
11839+ pushq %rdi
11840+ pushq %rbx
11841+
11842+#ifdef CONFIG_PARAVIRT
11843+ PV_SAVE_REGS(CLBR_RDI)
11844+#endif
11845+
11846+ GET_CR3_INTO_RDI
11847+ mov %rdi,%rbx
11848+ add $__START_KERNEL_map,%rbx
11849+ sub phys_base(%rip),%rbx
11850+
11851+#ifdef CONFIG_PARAVIRT
11852+ pushq %rdi
11853+ cmpl $0, pv_info+PARAVIRT_enabled
11854+ jz 1f
11855+ i = 0
11856+ .rept USER_PGD_PTRS
11857+ mov i*8(%rbx),%rsi
11858+ mov $0,%sil
11859+ lea i*8(%rbx),%rdi
11860+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11861+ i = i + 1
11862+ .endr
11863+ jmp 2f
11864+1:
11865+#endif
11866+
11867+ i = 0
11868+ .rept USER_PGD_PTRS
11869+ movb $0,i*8(%rbx)
11870+ i = i + 1
11871+ .endr
11872+
11873+#ifdef CONFIG_PARAVIRT
11874+2: popq %rdi
11875+#endif
11876+ SET_RDI_INTO_CR3
11877+
11878+#ifdef CONFIG_PAX_KERNEXEC
11879+ GET_CR0_INTO_RDI
11880+ bts $16,%rdi
11881+ SET_RDI_INTO_CR0
11882+#endif
11883+
11884+#ifdef CONFIG_PARAVIRT
11885+ PV_RESTORE_REGS(CLBR_RDI)
11886+#endif
11887+
11888+ popq %rbx
11889+ popq %rdi
11890+ retq
11891+ENDPROC(pax_enter_kernel_user)
11892+
11893+ENTRY(pax_exit_kernel_user)
11894+ push %rdi
11895+
11896+#ifdef CONFIG_PARAVIRT
11897+ pushq %rbx
11898+ PV_SAVE_REGS(CLBR_RDI)
11899+#endif
11900+
11901+#ifdef CONFIG_PAX_KERNEXEC
11902+ GET_CR0_INTO_RDI
11903+ btr $16,%rdi
11904+ SET_RDI_INTO_CR0
11905+#endif
11906+
11907+ GET_CR3_INTO_RDI
11908+ add $__START_KERNEL_map,%rdi
11909+ sub phys_base(%rip),%rdi
11910+
11911+#ifdef CONFIG_PARAVIRT
11912+ cmpl $0, pv_info+PARAVIRT_enabled
11913+ jz 1f
11914+ mov %rdi,%rbx
11915+ i = 0
11916+ .rept USER_PGD_PTRS
11917+ mov i*8(%rbx),%rsi
11918+ mov $0x67,%sil
11919+ lea i*8(%rbx),%rdi
11920+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11921+ i = i + 1
11922+ .endr
11923+ jmp 2f
11924+1:
11925+#endif
11926+
11927+ i = 0
11928+ .rept USER_PGD_PTRS
11929+ movb $0x67,i*8(%rdi)
11930+ i = i + 1
11931+ .endr
11932+
11933+#ifdef CONFIG_PARAVIRT
11934+2: PV_RESTORE_REGS(CLBR_RDI)
11935+ popq %rbx
11936+#endif
11937+
11938+ popq %rdi
11939+ retq
11940+ENDPROC(pax_exit_kernel_user)
11941+#endif
11942+
11943+ .macro pax_erase_kstack
11944+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11945+ call pax_erase_kstack
11946+#endif
11947+ .endm
11948+
11949+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11950+/*
11951+ * r10: thread_info
11952+ * rcx, rdx: can be clobbered
11953+ */
11954+ENTRY(pax_erase_kstack)
11955+ pushq %rdi
11956+ pushq %rax
11957+ pushq %r10
11958+
11959+ GET_THREAD_INFO(%r10)
11960+ mov TI_lowest_stack(%r10), %rdi
11961+ mov $-0xBEEF, %rax
11962+ std
11963+
11964+1: mov %edi, %ecx
11965+ and $THREAD_SIZE_asm - 1, %ecx
11966+ shr $3, %ecx
11967+ repne scasq
11968+ jecxz 2f
11969+
11970+ cmp $2*8, %ecx
11971+ jc 2f
11972+
11973+ mov $2*8, %ecx
11974+ repe scasq
11975+ jecxz 2f
11976+ jne 1b
11977+
11978+2: cld
11979+ mov %esp, %ecx
11980+ sub %edi, %ecx
11981+
11982+ cmp $THREAD_SIZE_asm, %rcx
11983+ jb 3f
11984+ ud2
11985+3:
11986+
11987+ shr $3, %ecx
11988+ rep stosq
11989+
11990+ mov TI_task_thread_sp0(%r10), %rdi
11991+ sub $256, %rdi
11992+ mov %rdi, TI_lowest_stack(%r10)
11993+
11994+ popq %r10
11995+ popq %rax
11996+ popq %rdi
11997+ ret
11998+ENDPROC(pax_erase_kstack)
11999+#endif
12000
12001 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12002 #ifdef CONFIG_TRACE_IRQFLAGS
12003@@ -318,7 +577,7 @@ ENTRY(save_args)
12004 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12005 movq_cfi rbp, 8 /* push %rbp */
12006 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12007- testl $3, CS(%rdi)
12008+ testb $3, CS(%rdi)
12009 je 1f
12010 SWAPGS
12011 /*
12012@@ -409,7 +668,7 @@ ENTRY(ret_from_fork)
12013
12014 RESTORE_REST
12015
12016- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12017+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12018 je int_ret_from_sys_call
12019
12020 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12021@@ -455,7 +714,7 @@ END(ret_from_fork)
12022 ENTRY(system_call)
12023 CFI_STARTPROC simple
12024 CFI_SIGNAL_FRAME
12025- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12026+ CFI_DEF_CFA rsp,0
12027 CFI_REGISTER rip,rcx
12028 /*CFI_REGISTER rflags,r11*/
12029 SWAPGS_UNSAFE_STACK
12030@@ -468,12 +727,13 @@ ENTRY(system_call_after_swapgs)
12031
12032 movq %rsp,PER_CPU_VAR(old_rsp)
12033 movq PER_CPU_VAR(kernel_stack),%rsp
12034+ pax_enter_kernel_user
12035 /*
12036 * No need to follow this irqs off/on section - it's straight
12037 * and short:
12038 */
12039 ENABLE_INTERRUPTS(CLBR_NONE)
12040- SAVE_ARGS 8,1
12041+ SAVE_ARGS 8*6,1
12042 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12043 movq %rcx,RIP-ARGOFFSET(%rsp)
12044 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12045@@ -502,6 +762,8 @@ sysret_check:
12046 andl %edi,%edx
12047 jnz sysret_careful
12048 CFI_REMEMBER_STATE
12049+ pax_exit_kernel_user
12050+ pax_erase_kstack
12051 /*
12052 * sysretq will re-enable interrupts:
12053 */
12054@@ -560,6 +822,9 @@ auditsys:
12055 movq %rax,%rsi /* 2nd arg: syscall number */
12056 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12057 call audit_syscall_entry
12058+
12059+ pax_erase_kstack
12060+
12061 LOAD_ARGS 0 /* reload call-clobbered registers */
12062 jmp system_call_fastpath
12063
12064@@ -590,6 +855,9 @@ tracesys:
12065 FIXUP_TOP_OF_STACK %rdi
12066 movq %rsp,%rdi
12067 call syscall_trace_enter
12068+
12069+ pax_erase_kstack
12070+
12071 /*
12072 * Reload arg registers from stack in case ptrace changed them.
12073 * We don't reload %rax because syscall_trace_enter() returned
12074@@ -611,7 +879,7 @@ tracesys:
12075 GLOBAL(int_ret_from_sys_call)
12076 DISABLE_INTERRUPTS(CLBR_NONE)
12077 TRACE_IRQS_OFF
12078- testl $3,CS-ARGOFFSET(%rsp)
12079+ testb $3,CS-ARGOFFSET(%rsp)
12080 je retint_restore_args
12081 movl $_TIF_ALLWORK_MASK,%edi
12082 /* edi: mask to check */
12083@@ -793,6 +1061,16 @@ END(interrupt)
12084 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12085 call save_args
12086 PARTIAL_FRAME 0
12087+#ifdef CONFIG_PAX_MEMORY_UDEREF
12088+ testb $3, CS(%rdi)
12089+ jnz 1f
12090+ pax_enter_kernel
12091+ jmp 2f
12092+1: pax_enter_kernel_user
12093+2:
12094+#else
12095+ pax_enter_kernel
12096+#endif
12097 call \func
12098 .endm
12099
12100@@ -825,7 +1103,7 @@ ret_from_intr:
12101 CFI_ADJUST_CFA_OFFSET -8
12102 exit_intr:
12103 GET_THREAD_INFO(%rcx)
12104- testl $3,CS-ARGOFFSET(%rsp)
12105+ testb $3,CS-ARGOFFSET(%rsp)
12106 je retint_kernel
12107
12108 /* Interrupt came from user space */
12109@@ -847,12 +1125,15 @@ retint_swapgs: /* return to user-space
12110 * The iretq could re-enable interrupts:
12111 */
12112 DISABLE_INTERRUPTS(CLBR_ANY)
12113+ pax_exit_kernel_user
12114+ pax_erase_kstack
12115 TRACE_IRQS_IRETQ
12116 SWAPGS
12117 jmp restore_args
12118
12119 retint_restore_args: /* return to kernel space */
12120 DISABLE_INTERRUPTS(CLBR_ANY)
12121+ pax_exit_kernel
12122 /*
12123 * The iretq could re-enable interrupts:
12124 */
12125@@ -1027,6 +1308,16 @@ ENTRY(\sym)
12126 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12127 call error_entry
12128 DEFAULT_FRAME 0
12129+#ifdef CONFIG_PAX_MEMORY_UDEREF
12130+ testb $3, CS(%rsp)
12131+ jnz 1f
12132+ pax_enter_kernel
12133+ jmp 2f
12134+1: pax_enter_kernel_user
12135+2:
12136+#else
12137+ pax_enter_kernel
12138+#endif
12139 movq %rsp,%rdi /* pt_regs pointer */
12140 xorl %esi,%esi /* no error code */
12141 call \do_sym
12142@@ -1044,6 +1335,16 @@ ENTRY(\sym)
12143 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12144 call save_paranoid
12145 TRACE_IRQS_OFF
12146+#ifdef CONFIG_PAX_MEMORY_UDEREF
12147+ testb $3, CS(%rsp)
12148+ jnz 1f
12149+ pax_enter_kernel
12150+ jmp 2f
12151+1: pax_enter_kernel_user
12152+2:
12153+#else
12154+ pax_enter_kernel
12155+#endif
12156 movq %rsp,%rdi /* pt_regs pointer */
12157 xorl %esi,%esi /* no error code */
12158 call \do_sym
12159@@ -1052,7 +1353,7 @@ ENTRY(\sym)
12160 END(\sym)
12161 .endm
12162
12163-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12164+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12165 .macro paranoidzeroentry_ist sym do_sym ist
12166 ENTRY(\sym)
12167 INTR_FRAME
12168@@ -1062,8 +1363,24 @@ ENTRY(\sym)
12169 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12170 call save_paranoid
12171 TRACE_IRQS_OFF
12172+#ifdef CONFIG_PAX_MEMORY_UDEREF
12173+ testb $3, CS(%rsp)
12174+ jnz 1f
12175+ pax_enter_kernel
12176+ jmp 2f
12177+1: pax_enter_kernel_user
12178+2:
12179+#else
12180+ pax_enter_kernel
12181+#endif
12182 movq %rsp,%rdi /* pt_regs pointer */
12183 xorl %esi,%esi /* no error code */
12184+#ifdef CONFIG_SMP
12185+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12186+ lea init_tss(%r12), %r12
12187+#else
12188+ lea init_tss(%rip), %r12
12189+#endif
12190 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12191 call \do_sym
12192 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12193@@ -1080,6 +1397,16 @@ ENTRY(\sym)
12194 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12195 call error_entry
12196 DEFAULT_FRAME 0
12197+#ifdef CONFIG_PAX_MEMORY_UDEREF
12198+ testb $3, CS(%rsp)
12199+ jnz 1f
12200+ pax_enter_kernel
12201+ jmp 2f
12202+1: pax_enter_kernel_user
12203+2:
12204+#else
12205+ pax_enter_kernel
12206+#endif
12207 movq %rsp,%rdi /* pt_regs pointer */
12208 movq ORIG_RAX(%rsp),%rsi /* get error code */
12209 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12210@@ -1099,6 +1426,16 @@ ENTRY(\sym)
12211 call save_paranoid
12212 DEFAULT_FRAME 0
12213 TRACE_IRQS_OFF
12214+#ifdef CONFIG_PAX_MEMORY_UDEREF
12215+ testb $3, CS(%rsp)
12216+ jnz 1f
12217+ pax_enter_kernel
12218+ jmp 2f
12219+1: pax_enter_kernel_user
12220+2:
12221+#else
12222+ pax_enter_kernel
12223+#endif
12224 movq %rsp,%rdi /* pt_regs pointer */
12225 movq ORIG_RAX(%rsp),%rsi /* get error code */
12226 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12227@@ -1361,14 +1698,27 @@ ENTRY(paranoid_exit)
12228 TRACE_IRQS_OFF
12229 testl %ebx,%ebx /* swapgs needed? */
12230 jnz paranoid_restore
12231- testl $3,CS(%rsp)
12232+ testb $3,CS(%rsp)
12233 jnz paranoid_userspace
12234+#ifdef CONFIG_PAX_MEMORY_UDEREF
12235+ pax_exit_kernel
12236+ TRACE_IRQS_IRETQ 0
12237+ SWAPGS_UNSAFE_STACK
12238+ RESTORE_ALL 8
12239+ jmp irq_return
12240+#endif
12241 paranoid_swapgs:
12242+#ifdef CONFIG_PAX_MEMORY_UDEREF
12243+ pax_exit_kernel_user
12244+#else
12245+ pax_exit_kernel
12246+#endif
12247 TRACE_IRQS_IRETQ 0
12248 SWAPGS_UNSAFE_STACK
12249 RESTORE_ALL 8
12250 jmp irq_return
12251 paranoid_restore:
12252+ pax_exit_kernel
12253 TRACE_IRQS_IRETQ 0
12254 RESTORE_ALL 8
12255 jmp irq_return
12256@@ -1426,7 +1776,7 @@ ENTRY(error_entry)
12257 movq_cfi r14, R14+8
12258 movq_cfi r15, R15+8
12259 xorl %ebx,%ebx
12260- testl $3,CS+8(%rsp)
12261+ testb $3,CS+8(%rsp)
12262 je error_kernelspace
12263 error_swapgs:
12264 SWAPGS
12265@@ -1490,6 +1840,16 @@ ENTRY(nmi)
12266 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12267 call save_paranoid
12268 DEFAULT_FRAME 0
12269+#ifdef CONFIG_PAX_MEMORY_UDEREF
12270+ testb $3, CS(%rsp)
12271+ jnz 1f
12272+ pax_enter_kernel
12273+ jmp 2f
12274+1: pax_enter_kernel_user
12275+2:
12276+#else
12277+ pax_enter_kernel
12278+#endif
12279 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12280 movq %rsp,%rdi
12281 movq $-1,%rsi
12282@@ -1500,11 +1860,25 @@ ENTRY(nmi)
12283 DISABLE_INTERRUPTS(CLBR_NONE)
12284 testl %ebx,%ebx /* swapgs needed? */
12285 jnz nmi_restore
12286- testl $3,CS(%rsp)
12287+ testb $3,CS(%rsp)
12288 jnz nmi_userspace
12289+#ifdef CONFIG_PAX_MEMORY_UDEREF
12290+ pax_exit_kernel
12291+ SWAPGS_UNSAFE_STACK
12292+ RESTORE_ALL 8
12293+ jmp irq_return
12294+#endif
12295 nmi_swapgs:
12296+#ifdef CONFIG_PAX_MEMORY_UDEREF
12297+ pax_exit_kernel_user
12298+#else
12299+ pax_exit_kernel
12300+#endif
12301 SWAPGS_UNSAFE_STACK
12302+ RESTORE_ALL 8
12303+ jmp irq_return
12304 nmi_restore:
12305+ pax_exit_kernel
12306 RESTORE_ALL 8
12307 jmp irq_return
12308 nmi_userspace:
12309diff -urNp linux-3.0.4/arch/x86/kernel/ftrace.c linux-3.0.4/arch/x86/kernel/ftrace.c
12310--- linux-3.0.4/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
12311+++ linux-3.0.4/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
12312@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12313 static const void *mod_code_newcode; /* holds the text to write to the IP */
12314
12315 static unsigned nmi_wait_count;
12316-static atomic_t nmi_update_count = ATOMIC_INIT(0);
12317+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12318
12319 int ftrace_arch_read_dyn_info(char *buf, int size)
12320 {
12321@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12322
12323 r = snprintf(buf, size, "%u %u",
12324 nmi_wait_count,
12325- atomic_read(&nmi_update_count));
12326+ atomic_read_unchecked(&nmi_update_count));
12327 return r;
12328 }
12329
12330@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12331
12332 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12333 smp_rmb();
12334+ pax_open_kernel();
12335 ftrace_mod_code();
12336- atomic_inc(&nmi_update_count);
12337+ pax_close_kernel();
12338+ atomic_inc_unchecked(&nmi_update_count);
12339 }
12340 /* Must have previous changes seen before executions */
12341 smp_mb();
12342@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12343 {
12344 unsigned char replaced[MCOUNT_INSN_SIZE];
12345
12346+ ip = ktla_ktva(ip);
12347+
12348 /*
12349 * Note: Due to modules and __init, code can
12350 * disappear and change, we need to protect against faulting
12351@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12352 unsigned char old[MCOUNT_INSN_SIZE], *new;
12353 int ret;
12354
12355- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12356+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12357 new = ftrace_call_replace(ip, (unsigned long)func);
12358 ret = ftrace_modify_code(ip, old, new);
12359
12360@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12361 {
12362 unsigned char code[MCOUNT_INSN_SIZE];
12363
12364+ ip = ktla_ktva(ip);
12365+
12366 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12367 return -EFAULT;
12368
12369diff -urNp linux-3.0.4/arch/x86/kernel/head32.c linux-3.0.4/arch/x86/kernel/head32.c
12370--- linux-3.0.4/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
12371+++ linux-3.0.4/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
12372@@ -19,6 +19,7 @@
12373 #include <asm/io_apic.h>
12374 #include <asm/bios_ebda.h>
12375 #include <asm/tlbflush.h>
12376+#include <asm/boot.h>
12377
12378 static void __init i386_default_early_setup(void)
12379 {
12380@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
12381 {
12382 memblock_init();
12383
12384- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12385+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12386
12387 #ifdef CONFIG_BLK_DEV_INITRD
12388 /* Reserve INITRD */
12389diff -urNp linux-3.0.4/arch/x86/kernel/head_32.S linux-3.0.4/arch/x86/kernel/head_32.S
12390--- linux-3.0.4/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
12391+++ linux-3.0.4/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
12392@@ -25,6 +25,12 @@
12393 /* Physical address */
12394 #define pa(X) ((X) - __PAGE_OFFSET)
12395
12396+#ifdef CONFIG_PAX_KERNEXEC
12397+#define ta(X) (X)
12398+#else
12399+#define ta(X) ((X) - __PAGE_OFFSET)
12400+#endif
12401+
12402 /*
12403 * References to members of the new_cpu_data structure.
12404 */
12405@@ -54,11 +60,7 @@
12406 * and small than max_low_pfn, otherwise will waste some page table entries
12407 */
12408
12409-#if PTRS_PER_PMD > 1
12410-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12411-#else
12412-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12413-#endif
12414+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12415
12416 /* Number of possible pages in the lowmem region */
12417 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12418@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12419 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12420
12421 /*
12422+ * Real beginning of normal "text" segment
12423+ */
12424+ENTRY(stext)
12425+ENTRY(_stext)
12426+
12427+/*
12428 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12429 * %esi points to the real-mode code as a 32-bit pointer.
12430 * CS and DS must be 4 GB flat segments, but we don't depend on
12431@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12432 * can.
12433 */
12434 __HEAD
12435+
12436+#ifdef CONFIG_PAX_KERNEXEC
12437+ jmp startup_32
12438+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12439+.fill PAGE_SIZE-5,1,0xcc
12440+#endif
12441+
12442 ENTRY(startup_32)
12443 movl pa(stack_start),%ecx
12444
12445@@ -105,6 +120,57 @@ ENTRY(startup_32)
12446 2:
12447 leal -__PAGE_OFFSET(%ecx),%esp
12448
12449+#ifdef CONFIG_SMP
12450+ movl $pa(cpu_gdt_table),%edi
12451+ movl $__per_cpu_load,%eax
12452+ movw %ax,__KERNEL_PERCPU + 2(%edi)
12453+ rorl $16,%eax
12454+ movb %al,__KERNEL_PERCPU + 4(%edi)
12455+ movb %ah,__KERNEL_PERCPU + 7(%edi)
12456+ movl $__per_cpu_end - 1,%eax
12457+ subl $__per_cpu_start,%eax
12458+ movw %ax,__KERNEL_PERCPU + 0(%edi)
12459+#endif
12460+
12461+#ifdef CONFIG_PAX_MEMORY_UDEREF
12462+ movl $NR_CPUS,%ecx
12463+ movl $pa(cpu_gdt_table),%edi
12464+1:
12465+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12466+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12467+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12468+ addl $PAGE_SIZE_asm,%edi
12469+ loop 1b
12470+#endif
12471+
12472+#ifdef CONFIG_PAX_KERNEXEC
12473+ movl $pa(boot_gdt),%edi
12474+ movl $__LOAD_PHYSICAL_ADDR,%eax
12475+ movw %ax,__BOOT_CS + 2(%edi)
12476+ rorl $16,%eax
12477+ movb %al,__BOOT_CS + 4(%edi)
12478+ movb %ah,__BOOT_CS + 7(%edi)
12479+ rorl $16,%eax
12480+
12481+ ljmp $(__BOOT_CS),$1f
12482+1:
12483+
12484+ movl $NR_CPUS,%ecx
12485+ movl $pa(cpu_gdt_table),%edi
12486+ addl $__PAGE_OFFSET,%eax
12487+1:
12488+ movw %ax,__KERNEL_CS + 2(%edi)
12489+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12490+ rorl $16,%eax
12491+ movb %al,__KERNEL_CS + 4(%edi)
12492+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12493+ movb %ah,__KERNEL_CS + 7(%edi)
12494+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12495+ rorl $16,%eax
12496+ addl $PAGE_SIZE_asm,%edi
12497+ loop 1b
12498+#endif
12499+
12500 /*
12501 * Clear BSS first so that there are no surprises...
12502 */
12503@@ -195,8 +261,11 @@ ENTRY(startup_32)
12504 movl %eax, pa(max_pfn_mapped)
12505
12506 /* Do early initialization of the fixmap area */
12507- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12508- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12509+#ifdef CONFIG_COMPAT_VDSO
12510+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12511+#else
12512+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12513+#endif
12514 #else /* Not PAE */
12515
12516 page_pde_offset = (__PAGE_OFFSET >> 20);
12517@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12518 movl %eax, pa(max_pfn_mapped)
12519
12520 /* Do early initialization of the fixmap area */
12521- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12522- movl %eax,pa(initial_page_table+0xffc)
12523+#ifdef CONFIG_COMPAT_VDSO
12524+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12525+#else
12526+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12527+#endif
12528 #endif
12529
12530 #ifdef CONFIG_PARAVIRT
12531@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12532 cmpl $num_subarch_entries, %eax
12533 jae bad_subarch
12534
12535- movl pa(subarch_entries)(,%eax,4), %eax
12536- subl $__PAGE_OFFSET, %eax
12537- jmp *%eax
12538+ jmp *pa(subarch_entries)(,%eax,4)
12539
12540 bad_subarch:
12541 WEAK(lguest_entry)
12542@@ -255,10 +325,10 @@ WEAK(xen_entry)
12543 __INITDATA
12544
12545 subarch_entries:
12546- .long default_entry /* normal x86/PC */
12547- .long lguest_entry /* lguest hypervisor */
12548- .long xen_entry /* Xen hypervisor */
12549- .long default_entry /* Moorestown MID */
12550+ .long ta(default_entry) /* normal x86/PC */
12551+ .long ta(lguest_entry) /* lguest hypervisor */
12552+ .long ta(xen_entry) /* Xen hypervisor */
12553+ .long ta(default_entry) /* Moorestown MID */
12554 num_subarch_entries = (. - subarch_entries) / 4
12555 .previous
12556 #else
12557@@ -312,6 +382,7 @@ default_entry:
12558 orl %edx,%eax
12559 movl %eax,%cr4
12560
12561+#ifdef CONFIG_X86_PAE
12562 testb $X86_CR4_PAE, %al # check if PAE is enabled
12563 jz 6f
12564
12565@@ -340,6 +411,9 @@ default_entry:
12566 /* Make changes effective */
12567 wrmsr
12568
12569+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12570+#endif
12571+
12572 6:
12573
12574 /*
12575@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12576 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12577 movl %eax,%ss # after changing gdt.
12578
12579- movl $(__USER_DS),%eax # DS/ES contains default USER segment
12580+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12581 movl %eax,%ds
12582 movl %eax,%es
12583
12584@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12585 */
12586 cmpb $0,ready
12587 jne 1f
12588- movl $gdt_page,%eax
12589+ movl $cpu_gdt_table,%eax
12590 movl $stack_canary,%ecx
12591+#ifdef CONFIG_SMP
12592+ addl $__per_cpu_load,%ecx
12593+#endif
12594 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12595 shrl $16, %ecx
12596 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12597 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12598 1:
12599-#endif
12600 movl $(__KERNEL_STACK_CANARY),%eax
12601+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12602+ movl $(__USER_DS),%eax
12603+#else
12604+ xorl %eax,%eax
12605+#endif
12606 movl %eax,%gs
12607
12608 xorl %eax,%eax # Clear LDT
12609@@ -558,22 +639,22 @@ early_page_fault:
12610 jmp early_fault
12611
12612 early_fault:
12613- cld
12614 #ifdef CONFIG_PRINTK
12615+ cmpl $1,%ss:early_recursion_flag
12616+ je hlt_loop
12617+ incl %ss:early_recursion_flag
12618+ cld
12619 pusha
12620 movl $(__KERNEL_DS),%eax
12621 movl %eax,%ds
12622 movl %eax,%es
12623- cmpl $2,early_recursion_flag
12624- je hlt_loop
12625- incl early_recursion_flag
12626 movl %cr2,%eax
12627 pushl %eax
12628 pushl %edx /* trapno */
12629 pushl $fault_msg
12630 call printk
12631+; call dump_stack
12632 #endif
12633- call dump_stack
12634 hlt_loop:
12635 hlt
12636 jmp hlt_loop
12637@@ -581,8 +662,11 @@ hlt_loop:
12638 /* This is the default interrupt "handler" :-) */
12639 ALIGN
12640 ignore_int:
12641- cld
12642 #ifdef CONFIG_PRINTK
12643+ cmpl $2,%ss:early_recursion_flag
12644+ je hlt_loop
12645+ incl %ss:early_recursion_flag
12646+ cld
12647 pushl %eax
12648 pushl %ecx
12649 pushl %edx
12650@@ -591,9 +675,6 @@ ignore_int:
12651 movl $(__KERNEL_DS),%eax
12652 movl %eax,%ds
12653 movl %eax,%es
12654- cmpl $2,early_recursion_flag
12655- je hlt_loop
12656- incl early_recursion_flag
12657 pushl 16(%esp)
12658 pushl 24(%esp)
12659 pushl 32(%esp)
12660@@ -622,29 +703,43 @@ ENTRY(initial_code)
12661 /*
12662 * BSS section
12663 */
12664-__PAGE_ALIGNED_BSS
12665- .align PAGE_SIZE
12666 #ifdef CONFIG_X86_PAE
12667+.section .initial_pg_pmd,"a",@progbits
12668 initial_pg_pmd:
12669 .fill 1024*KPMDS,4,0
12670 #else
12671+.section .initial_page_table,"a",@progbits
12672 ENTRY(initial_page_table)
12673 .fill 1024,4,0
12674 #endif
12675+.section .initial_pg_fixmap,"a",@progbits
12676 initial_pg_fixmap:
12677 .fill 1024,4,0
12678+.section .empty_zero_page,"a",@progbits
12679 ENTRY(empty_zero_page)
12680 .fill 4096,1,0
12681+.section .swapper_pg_dir,"a",@progbits
12682 ENTRY(swapper_pg_dir)
12683+#ifdef CONFIG_X86_PAE
12684+ .fill 4,8,0
12685+#else
12686 .fill 1024,4,0
12687+#endif
12688+
12689+/*
12690+ * The IDT has to be page-aligned to simplify the Pentium
12691+ * F0 0F bug workaround.. We have a special link segment
12692+ * for this.
12693+ */
12694+.section .idt,"a",@progbits
12695+ENTRY(idt_table)
12696+ .fill 256,8,0
12697
12698 /*
12699 * This starts the data section.
12700 */
12701 #ifdef CONFIG_X86_PAE
12702-__PAGE_ALIGNED_DATA
12703- /* Page-aligned for the benefit of paravirt? */
12704- .align PAGE_SIZE
12705+.section .initial_page_table,"a",@progbits
12706 ENTRY(initial_page_table)
12707 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12708 # if KPMDS == 3
12709@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12710 # error "Kernel PMDs should be 1, 2 or 3"
12711 # endif
12712 .align PAGE_SIZE /* needs to be page-sized too */
12713+
12714+#ifdef CONFIG_PAX_PER_CPU_PGD
12715+ENTRY(cpu_pgd)
12716+ .rept NR_CPUS
12717+ .fill 4,8,0
12718+ .endr
12719+#endif
12720+
12721 #endif
12722
12723 .data
12724 .balign 4
12725 ENTRY(stack_start)
12726- .long init_thread_union+THREAD_SIZE
12727+ .long init_thread_union+THREAD_SIZE-8
12728+
12729+ready: .byte 0
12730
12731+.section .rodata,"a",@progbits
12732 early_recursion_flag:
12733 .long 0
12734
12735-ready: .byte 0
12736-
12737 int_msg:
12738 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12739
12740@@ -707,7 +811,7 @@ fault_msg:
12741 .word 0 # 32 bit align gdt_desc.address
12742 boot_gdt_descr:
12743 .word __BOOT_DS+7
12744- .long boot_gdt - __PAGE_OFFSET
12745+ .long pa(boot_gdt)
12746
12747 .word 0 # 32-bit align idt_desc.address
12748 idt_descr:
12749@@ -718,7 +822,7 @@ idt_descr:
12750 .word 0 # 32 bit align gdt_desc.address
12751 ENTRY(early_gdt_descr)
12752 .word GDT_ENTRIES*8-1
12753- .long gdt_page /* Overwritten for secondary CPUs */
12754+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
12755
12756 /*
12757 * The boot_gdt must mirror the equivalent in setup.S and is
12758@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12759 .align L1_CACHE_BYTES
12760 ENTRY(boot_gdt)
12761 .fill GDT_ENTRY_BOOT_CS,8,0
12762- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12763- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12764+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12765+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12766+
12767+ .align PAGE_SIZE_asm
12768+ENTRY(cpu_gdt_table)
12769+ .rept NR_CPUS
12770+ .quad 0x0000000000000000 /* NULL descriptor */
12771+ .quad 0x0000000000000000 /* 0x0b reserved */
12772+ .quad 0x0000000000000000 /* 0x13 reserved */
12773+ .quad 0x0000000000000000 /* 0x1b reserved */
12774+
12775+#ifdef CONFIG_PAX_KERNEXEC
12776+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12777+#else
12778+ .quad 0x0000000000000000 /* 0x20 unused */
12779+#endif
12780+
12781+ .quad 0x0000000000000000 /* 0x28 unused */
12782+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12783+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12784+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12785+ .quad 0x0000000000000000 /* 0x4b reserved */
12786+ .quad 0x0000000000000000 /* 0x53 reserved */
12787+ .quad 0x0000000000000000 /* 0x5b reserved */
12788+
12789+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12790+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12791+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12792+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12793+
12794+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12795+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12796+
12797+ /*
12798+ * Segments used for calling PnP BIOS have byte granularity.
12799+ * The code segments and data segments have fixed 64k limits,
12800+ * the transfer segment sizes are set at run time.
12801+ */
12802+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
12803+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
12804+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
12805+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
12806+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
12807+
12808+ /*
12809+ * The APM segments have byte granularity and their bases
12810+ * are set at run time. All have 64k limits.
12811+ */
12812+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12813+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12814+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
12815+
12816+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12817+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12818+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12819+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12820+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12821+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12822+
12823+ /* Be sure this is zeroed to avoid false validations in Xen */
12824+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12825+ .endr
12826diff -urNp linux-3.0.4/arch/x86/kernel/head_64.S linux-3.0.4/arch/x86/kernel/head_64.S
12827--- linux-3.0.4/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
12828+++ linux-3.0.4/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
12829@@ -19,6 +19,7 @@
12830 #include <asm/cache.h>
12831 #include <asm/processor-flags.h>
12832 #include <asm/percpu.h>
12833+#include <asm/cpufeature.h>
12834
12835 #ifdef CONFIG_PARAVIRT
12836 #include <asm/asm-offsets.h>
12837@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12838 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12839 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12840 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12841+L4_VMALLOC_START = pgd_index(VMALLOC_START)
12842+L3_VMALLOC_START = pud_index(VMALLOC_START)
12843+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12844+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12845
12846 .text
12847 __HEAD
12848@@ -85,35 +90,22 @@ startup_64:
12849 */
12850 addq %rbp, init_level4_pgt + 0(%rip)
12851 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12852+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12853+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12854 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12855
12856 addq %rbp, level3_ident_pgt + 0(%rip)
12857+#ifndef CONFIG_XEN
12858+ addq %rbp, level3_ident_pgt + 8(%rip)
12859+#endif
12860
12861- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12862- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12863+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12864
12865- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12866+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12867+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12868
12869- /* Add an Identity mapping if I am above 1G */
12870- leaq _text(%rip), %rdi
12871- andq $PMD_PAGE_MASK, %rdi
12872-
12873- movq %rdi, %rax
12874- shrq $PUD_SHIFT, %rax
12875- andq $(PTRS_PER_PUD - 1), %rax
12876- jz ident_complete
12877-
12878- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12879- leaq level3_ident_pgt(%rip), %rbx
12880- movq %rdx, 0(%rbx, %rax, 8)
12881-
12882- movq %rdi, %rax
12883- shrq $PMD_SHIFT, %rax
12884- andq $(PTRS_PER_PMD - 1), %rax
12885- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12886- leaq level2_spare_pgt(%rip), %rbx
12887- movq %rdx, 0(%rbx, %rax, 8)
12888-ident_complete:
12889+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12890+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12891
12892 /*
12893 * Fixup the kernel text+data virtual addresses. Note that
12894@@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12895 * after the boot processor executes this code.
12896 */
12897
12898- /* Enable PAE mode and PGE */
12899- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12900+ /* Enable PAE mode and PSE/PGE */
12901+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12902 movq %rax, %cr4
12903
12904 /* Setup early boot stage 4 level pagetables. */
12905@@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12906 movl $MSR_EFER, %ecx
12907 rdmsr
12908 btsl $_EFER_SCE, %eax /* Enable System Call */
12909- btl $20,%edi /* No Execute supported? */
12910+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12911 jnc 1f
12912 btsl $_EFER_NX, %eax
12913+ leaq init_level4_pgt(%rip), %rdi
12914+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12915+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12916+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12917+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12918 1: wrmsr /* Make changes effective */
12919
12920 /* Setup cr0 */
12921@@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
12922 bad_address:
12923 jmp bad_address
12924
12925- .section ".init.text","ax"
12926+ __INIT
12927 #ifdef CONFIG_EARLY_PRINTK
12928 .globl early_idt_handlers
12929 early_idt_handlers:
12930@@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
12931 #endif /* EARLY_PRINTK */
12932 1: hlt
12933 jmp 1b
12934+ .previous
12935
12936 #ifdef CONFIG_EARLY_PRINTK
12937+ __INITDATA
12938 early_recursion_flag:
12939 .long 0
12940+ .previous
12941
12942+ .section .rodata,"a",@progbits
12943 early_idt_msg:
12944 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
12945 early_idt_ripmsg:
12946 .asciz "RIP %s\n"
12947-#endif /* CONFIG_EARLY_PRINTK */
12948 .previous
12949+#endif /* CONFIG_EARLY_PRINTK */
12950
12951+ .section .rodata,"a",@progbits
12952 #define NEXT_PAGE(name) \
12953 .balign PAGE_SIZE; \
12954 ENTRY(name)
12955@@ -338,7 +340,6 @@ ENTRY(name)
12956 i = i + 1 ; \
12957 .endr
12958
12959- .data
12960 /*
12961 * This default setting generates an ident mapping at address 0x100000
12962 * and a mapping for the kernel that precisely maps virtual address
12963@@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
12964 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12965 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
12966 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12967+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
12968+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
12969+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
12970+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
12971 .org init_level4_pgt + L4_START_KERNEL*8, 0
12972 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
12973 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
12974
12975+#ifdef CONFIG_PAX_PER_CPU_PGD
12976+NEXT_PAGE(cpu_pgd)
12977+ .rept NR_CPUS
12978+ .fill 512,8,0
12979+ .endr
12980+#endif
12981+
12982 NEXT_PAGE(level3_ident_pgt)
12983 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12984+#ifdef CONFIG_XEN
12985 .fill 511,8,0
12986+#else
12987+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
12988+ .fill 510,8,0
12989+#endif
12990+
12991+NEXT_PAGE(level3_vmalloc_pgt)
12992+ .fill 512,8,0
12993+
12994+NEXT_PAGE(level3_vmemmap_pgt)
12995+ .fill L3_VMEMMAP_START,8,0
12996+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
12997
12998 NEXT_PAGE(level3_kernel_pgt)
12999 .fill L3_START_KERNEL,8,0
13000@@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13001 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13002 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13003
13004+NEXT_PAGE(level2_vmemmap_pgt)
13005+ .fill 512,8,0
13006+
13007 NEXT_PAGE(level2_fixmap_pgt)
13008- .fill 506,8,0
13009- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13010- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13011- .fill 5,8,0
13012+ .fill 507,8,0
13013+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13014+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13015+ .fill 4,8,0
13016
13017-NEXT_PAGE(level1_fixmap_pgt)
13018+NEXT_PAGE(level1_vsyscall_pgt)
13019 .fill 512,8,0
13020
13021-NEXT_PAGE(level2_ident_pgt)
13022- /* Since I easily can, map the first 1G.
13023+ /* Since I easily can, map the first 2G.
13024 * Don't set NX because code runs from these pages.
13025 */
13026- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13027+NEXT_PAGE(level2_ident_pgt)
13028+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13029
13030 NEXT_PAGE(level2_kernel_pgt)
13031 /*
13032@@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13033 * If you want to increase this then increase MODULES_VADDR
13034 * too.)
13035 */
13036- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13037- KERNEL_IMAGE_SIZE/PMD_SIZE)
13038-
13039-NEXT_PAGE(level2_spare_pgt)
13040- .fill 512, 8, 0
13041+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13042
13043 #undef PMDS
13044 #undef NEXT_PAGE
13045
13046- .data
13047+ .align PAGE_SIZE
13048+ENTRY(cpu_gdt_table)
13049+ .rept NR_CPUS
13050+ .quad 0x0000000000000000 /* NULL descriptor */
13051+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13052+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13053+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13054+ .quad 0x00cffb000000ffff /* __USER32_CS */
13055+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13056+ .quad 0x00affb000000ffff /* __USER_CS */
13057+
13058+#ifdef CONFIG_PAX_KERNEXEC
13059+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13060+#else
13061+ .quad 0x0 /* unused */
13062+#endif
13063+
13064+ .quad 0,0 /* TSS */
13065+ .quad 0,0 /* LDT */
13066+ .quad 0,0,0 /* three TLS descriptors */
13067+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
13068+ /* asm/segment.h:GDT_ENTRIES must match this */
13069+
13070+ /* zero the remaining page */
13071+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13072+ .endr
13073+
13074 .align 16
13075 .globl early_gdt_descr
13076 early_gdt_descr:
13077 .word GDT_ENTRIES*8-1
13078 early_gdt_descr_base:
13079- .quad INIT_PER_CPU_VAR(gdt_page)
13080+ .quad cpu_gdt_table
13081
13082 ENTRY(phys_base)
13083 /* This must match the first entry in level2_kernel_pgt */
13084 .quad 0x0000000000000000
13085
13086 #include "../../x86/xen/xen-head.S"
13087-
13088- .section .bss, "aw", @nobits
13089+
13090+ .section .rodata,"a",@progbits
13091 .align L1_CACHE_BYTES
13092 ENTRY(idt_table)
13093- .skip IDT_ENTRIES * 16
13094+ .fill 512,8,0
13095
13096 __PAGE_ALIGNED_BSS
13097 .align PAGE_SIZE
13098diff -urNp linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c
13099--- linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13100+++ linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13101@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13102 EXPORT_SYMBOL(cmpxchg8b_emu);
13103 #endif
13104
13105+EXPORT_SYMBOL_GPL(cpu_gdt_table);
13106+
13107 /* Networking helper routines. */
13108 EXPORT_SYMBOL(csum_partial_copy_generic);
13109+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13110+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13111
13112 EXPORT_SYMBOL(__get_user_1);
13113 EXPORT_SYMBOL(__get_user_2);
13114@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13115
13116 EXPORT_SYMBOL(csum_partial);
13117 EXPORT_SYMBOL(empty_zero_page);
13118+
13119+#ifdef CONFIG_PAX_KERNEXEC
13120+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13121+#endif
13122diff -urNp linux-3.0.4/arch/x86/kernel/i8259.c linux-3.0.4/arch/x86/kernel/i8259.c
13123--- linux-3.0.4/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13124+++ linux-3.0.4/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13125@@ -210,7 +210,7 @@ spurious_8259A_irq:
13126 "spurious 8259A interrupt: IRQ%d.\n", irq);
13127 spurious_irq_mask |= irqmask;
13128 }
13129- atomic_inc(&irq_err_count);
13130+ atomic_inc_unchecked(&irq_err_count);
13131 /*
13132 * Theoretically we do not have to handle this IRQ,
13133 * but in Linux this does not cause problems and is
13134diff -urNp linux-3.0.4/arch/x86/kernel/init_task.c linux-3.0.4/arch/x86/kernel/init_task.c
13135--- linux-3.0.4/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
13136+++ linux-3.0.4/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
13137@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13138 * way process stacks are handled. This is done by having a special
13139 * "init_task" linker map entry..
13140 */
13141-union thread_union init_thread_union __init_task_data =
13142- { INIT_THREAD_INFO(init_task) };
13143+union thread_union init_thread_union __init_task_data;
13144
13145 /*
13146 * Initial task structure.
13147@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13148 * section. Since TSS's are completely CPU-local, we want them
13149 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13150 */
13151-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13152-
13153+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13154+EXPORT_SYMBOL(init_tss);
13155diff -urNp linux-3.0.4/arch/x86/kernel/ioport.c linux-3.0.4/arch/x86/kernel/ioport.c
13156--- linux-3.0.4/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
13157+++ linux-3.0.4/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
13158@@ -6,6 +6,7 @@
13159 #include <linux/sched.h>
13160 #include <linux/kernel.h>
13161 #include <linux/capability.h>
13162+#include <linux/security.h>
13163 #include <linux/errno.h>
13164 #include <linux/types.h>
13165 #include <linux/ioport.h>
13166@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13167
13168 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13169 return -EINVAL;
13170+#ifdef CONFIG_GRKERNSEC_IO
13171+ if (turn_on && grsec_disable_privio) {
13172+ gr_handle_ioperm();
13173+ return -EPERM;
13174+ }
13175+#endif
13176 if (turn_on && !capable(CAP_SYS_RAWIO))
13177 return -EPERM;
13178
13179@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13180 * because the ->io_bitmap_max value must match the bitmap
13181 * contents:
13182 */
13183- tss = &per_cpu(init_tss, get_cpu());
13184+ tss = init_tss + get_cpu();
13185
13186 if (turn_on)
13187 bitmap_clear(t->io_bitmap_ptr, from, num);
13188@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13189 return -EINVAL;
13190 /* Trying to gain more privileges? */
13191 if (level > old) {
13192+#ifdef CONFIG_GRKERNSEC_IO
13193+ if (grsec_disable_privio) {
13194+ gr_handle_iopl();
13195+ return -EPERM;
13196+ }
13197+#endif
13198 if (!capable(CAP_SYS_RAWIO))
13199 return -EPERM;
13200 }
13201diff -urNp linux-3.0.4/arch/x86/kernel/irq_32.c linux-3.0.4/arch/x86/kernel/irq_32.c
13202--- linux-3.0.4/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
13203+++ linux-3.0.4/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
13204@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13205 __asm__ __volatile__("andl %%esp,%0" :
13206 "=r" (sp) : "0" (THREAD_SIZE - 1));
13207
13208- return sp < (sizeof(struct thread_info) + STACK_WARN);
13209+ return sp < STACK_WARN;
13210 }
13211
13212 static void print_stack_overflow(void)
13213@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13214 * per-CPU IRQ handling contexts (thread information and stack)
13215 */
13216 union irq_ctx {
13217- struct thread_info tinfo;
13218- u32 stack[THREAD_SIZE/sizeof(u32)];
13219+ unsigned long previous_esp;
13220+ u32 stack[THREAD_SIZE/sizeof(u32)];
13221 } __attribute__((aligned(THREAD_SIZE)));
13222
13223 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13224@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13225 static inline int
13226 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13227 {
13228- union irq_ctx *curctx, *irqctx;
13229+ union irq_ctx *irqctx;
13230 u32 *isp, arg1, arg2;
13231
13232- curctx = (union irq_ctx *) current_thread_info();
13233 irqctx = __this_cpu_read(hardirq_ctx);
13234
13235 /*
13236@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13237 * handler) we can't do that and just have to keep using the
13238 * current stack (which is the irq stack already after all)
13239 */
13240- if (unlikely(curctx == irqctx))
13241+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13242 return 0;
13243
13244 /* build the stack frame on the IRQ stack */
13245- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13246- irqctx->tinfo.task = curctx->tinfo.task;
13247- irqctx->tinfo.previous_esp = current_stack_pointer;
13248+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13249+ irqctx->previous_esp = current_stack_pointer;
13250
13251- /*
13252- * Copy the softirq bits in preempt_count so that the
13253- * softirq checks work in the hardirq context.
13254- */
13255- irqctx->tinfo.preempt_count =
13256- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13257- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13258+#ifdef CONFIG_PAX_MEMORY_UDEREF
13259+ __set_fs(MAKE_MM_SEG(0));
13260+#endif
13261
13262 if (unlikely(overflow))
13263 call_on_stack(print_stack_overflow, isp);
13264@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13265 : "0" (irq), "1" (desc), "2" (isp),
13266 "D" (desc->handle_irq)
13267 : "memory", "cc", "ecx");
13268+
13269+#ifdef CONFIG_PAX_MEMORY_UDEREF
13270+ __set_fs(current_thread_info()->addr_limit);
13271+#endif
13272+
13273 return 1;
13274 }
13275
13276@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13277 */
13278 void __cpuinit irq_ctx_init(int cpu)
13279 {
13280- union irq_ctx *irqctx;
13281-
13282 if (per_cpu(hardirq_ctx, cpu))
13283 return;
13284
13285- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13286- THREAD_FLAGS,
13287- THREAD_ORDER));
13288- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13289- irqctx->tinfo.cpu = cpu;
13290- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13291- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13292-
13293- per_cpu(hardirq_ctx, cpu) = irqctx;
13294-
13295- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13296- THREAD_FLAGS,
13297- THREAD_ORDER));
13298- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13299- irqctx->tinfo.cpu = cpu;
13300- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13301-
13302- per_cpu(softirq_ctx, cpu) = irqctx;
13303+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13304+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13305
13306 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13307 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13308@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13309 asmlinkage void do_softirq(void)
13310 {
13311 unsigned long flags;
13312- struct thread_info *curctx;
13313 union irq_ctx *irqctx;
13314 u32 *isp;
13315
13316@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13317 local_irq_save(flags);
13318
13319 if (local_softirq_pending()) {
13320- curctx = current_thread_info();
13321 irqctx = __this_cpu_read(softirq_ctx);
13322- irqctx->tinfo.task = curctx->task;
13323- irqctx->tinfo.previous_esp = current_stack_pointer;
13324+ irqctx->previous_esp = current_stack_pointer;
13325
13326 /* build the stack frame on the softirq stack */
13327- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13328+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13329+
13330+#ifdef CONFIG_PAX_MEMORY_UDEREF
13331+ __set_fs(MAKE_MM_SEG(0));
13332+#endif
13333
13334 call_on_stack(__do_softirq, isp);
13335+
13336+#ifdef CONFIG_PAX_MEMORY_UDEREF
13337+ __set_fs(current_thread_info()->addr_limit);
13338+#endif
13339+
13340 /*
13341 * Shouldn't happen, we returned above if in_interrupt():
13342 */
13343diff -urNp linux-3.0.4/arch/x86/kernel/irq.c linux-3.0.4/arch/x86/kernel/irq.c
13344--- linux-3.0.4/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
13345+++ linux-3.0.4/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
13346@@ -17,7 +17,7 @@
13347 #include <asm/mce.h>
13348 #include <asm/hw_irq.h>
13349
13350-atomic_t irq_err_count;
13351+atomic_unchecked_t irq_err_count;
13352
13353 /* Function pointer for generic interrupt vector handling */
13354 void (*x86_platform_ipi_callback)(void) = NULL;
13355@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13356 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13357 seq_printf(p, " Machine check polls\n");
13358 #endif
13359- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13360+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13361 #if defined(CONFIG_X86_IO_APIC)
13362- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13363+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13364 #endif
13365 return 0;
13366 }
13367@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13368
13369 u64 arch_irq_stat(void)
13370 {
13371- u64 sum = atomic_read(&irq_err_count);
13372+ u64 sum = atomic_read_unchecked(&irq_err_count);
13373
13374 #ifdef CONFIG_X86_IO_APIC
13375- sum += atomic_read(&irq_mis_count);
13376+ sum += atomic_read_unchecked(&irq_mis_count);
13377 #endif
13378 return sum;
13379 }
13380diff -urNp linux-3.0.4/arch/x86/kernel/kgdb.c linux-3.0.4/arch/x86/kernel/kgdb.c
13381--- linux-3.0.4/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
13382+++ linux-3.0.4/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
13383@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13384 #ifdef CONFIG_X86_32
13385 switch (regno) {
13386 case GDB_SS:
13387- if (!user_mode_vm(regs))
13388+ if (!user_mode(regs))
13389 *(unsigned long *)mem = __KERNEL_DS;
13390 break;
13391 case GDB_SP:
13392- if (!user_mode_vm(regs))
13393+ if (!user_mode(regs))
13394 *(unsigned long *)mem = kernel_stack_pointer(regs);
13395 break;
13396 case GDB_GS:
13397@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13398 case 'k':
13399 /* clear the trace bit */
13400 linux_regs->flags &= ~X86_EFLAGS_TF;
13401- atomic_set(&kgdb_cpu_doing_single_step, -1);
13402+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13403
13404 /* set the trace bit if we're stepping */
13405 if (remcomInBuffer[0] == 's') {
13406 linux_regs->flags |= X86_EFLAGS_TF;
13407- atomic_set(&kgdb_cpu_doing_single_step,
13408+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13409 raw_smp_processor_id());
13410 }
13411
13412@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13413 return NOTIFY_DONE;
13414
13415 case DIE_DEBUG:
13416- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13417+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13418 if (user_mode(regs))
13419 return single_step_cont(regs, args);
13420 break;
13421diff -urNp linux-3.0.4/arch/x86/kernel/kprobes.c linux-3.0.4/arch/x86/kernel/kprobes.c
13422--- linux-3.0.4/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
13423+++ linux-3.0.4/arch/x86/kernel/kprobes.c 2011-08-23 21:47:55.000000000 -0400
13424@@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13425 } __attribute__((packed)) *insn;
13426
13427 insn = (struct __arch_relative_insn *)from;
13428+
13429+ pax_open_kernel();
13430 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13431 insn->op = op;
13432+ pax_close_kernel();
13433 }
13434
13435 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13436@@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13437 kprobe_opcode_t opcode;
13438 kprobe_opcode_t *orig_opcodes = opcodes;
13439
13440- if (search_exception_tables((unsigned long)opcodes))
13441+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13442 return 0; /* Page fault may occur on this address. */
13443
13444 retry:
13445@@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13446 }
13447 }
13448 insn_get_length(&insn);
13449+ pax_open_kernel();
13450 memcpy(dest, insn.kaddr, insn.length);
13451+ pax_close_kernel();
13452
13453 #ifdef CONFIG_X86_64
13454 if (insn_rip_relative(&insn)) {
13455@@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13456 (u8 *) dest;
13457 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13458 disp = (u8 *) dest + insn_offset_displacement(&insn);
13459+ pax_open_kernel();
13460 *(s32 *) disp = (s32) newdisp;
13461+ pax_close_kernel();
13462 }
13463 #endif
13464 return insn.length;
13465@@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13466 */
13467 __copy_instruction(p->ainsn.insn, p->addr, 0);
13468
13469- if (can_boost(p->addr))
13470+ if (can_boost(ktla_ktva(p->addr)))
13471 p->ainsn.boostable = 0;
13472 else
13473 p->ainsn.boostable = -1;
13474
13475- p->opcode = *p->addr;
13476+ p->opcode = *(ktla_ktva(p->addr));
13477 }
13478
13479 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13480@@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13481 * nor set current_kprobe, because it doesn't use single
13482 * stepping.
13483 */
13484- regs->ip = (unsigned long)p->ainsn.insn;
13485+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13486 preempt_enable_no_resched();
13487 return;
13488 }
13489@@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13490 if (p->opcode == BREAKPOINT_INSTRUCTION)
13491 regs->ip = (unsigned long)p->addr;
13492 else
13493- regs->ip = (unsigned long)p->ainsn.insn;
13494+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13495 }
13496
13497 /*
13498@@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13499 setup_singlestep(p, regs, kcb, 0);
13500 return 1;
13501 }
13502- } else if (*addr != BREAKPOINT_INSTRUCTION) {
13503+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13504 /*
13505 * The breakpoint instruction was removed right
13506 * after we hit it. Another cpu has removed
13507@@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13508 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13509 {
13510 unsigned long *tos = stack_addr(regs);
13511- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13512+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13513 unsigned long orig_ip = (unsigned long)p->addr;
13514 kprobe_opcode_t *insn = p->ainsn.insn;
13515
13516@@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13517 struct die_args *args = data;
13518 int ret = NOTIFY_DONE;
13519
13520- if (args->regs && user_mode_vm(args->regs))
13521+ if (args->regs && user_mode(args->regs))
13522 return ret;
13523
13524 switch (val) {
13525@@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13526 * Verify if the address gap is in 2GB range, because this uses
13527 * a relative jump.
13528 */
13529- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13530+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13531 if (abs(rel) > 0x7fffffff)
13532 return -ERANGE;
13533
13534@@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13535 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13536
13537 /* Set probe function call */
13538- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13539+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13540
13541 /* Set returning jmp instruction at the tail of out-of-line buffer */
13542 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13543- (u8 *)op->kp.addr + op->optinsn.size);
13544+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13545
13546 flush_icache_range((unsigned long) buf,
13547 (unsigned long) buf + TMPL_END_IDX +
13548@@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13549 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13550
13551 /* Backup instructions which will be replaced by jump address */
13552- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13553+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13554 RELATIVE_ADDR_SIZE);
13555
13556 insn_buf[0] = RELATIVEJUMP_OPCODE;
13557diff -urNp linux-3.0.4/arch/x86/kernel/kvm.c linux-3.0.4/arch/x86/kernel/kvm.c
13558--- linux-3.0.4/arch/x86/kernel/kvm.c 2011-07-21 22:17:23.000000000 -0400
13559+++ linux-3.0.4/arch/x86/kernel/kvm.c 2011-08-24 18:10:12.000000000 -0400
13560@@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
13561 pv_mmu_ops.set_pud = kvm_set_pud;
13562 #if PAGETABLE_LEVELS == 4
13563 pv_mmu_ops.set_pgd = kvm_set_pgd;
13564+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
13565 #endif
13566 #endif
13567 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
13568diff -urNp linux-3.0.4/arch/x86/kernel/ldt.c linux-3.0.4/arch/x86/kernel/ldt.c
13569--- linux-3.0.4/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
13570+++ linux-3.0.4/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
13571@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13572 if (reload) {
13573 #ifdef CONFIG_SMP
13574 preempt_disable();
13575- load_LDT(pc);
13576+ load_LDT_nolock(pc);
13577 if (!cpumask_equal(mm_cpumask(current->mm),
13578 cpumask_of(smp_processor_id())))
13579 smp_call_function(flush_ldt, current->mm, 1);
13580 preempt_enable();
13581 #else
13582- load_LDT(pc);
13583+ load_LDT_nolock(pc);
13584 #endif
13585 }
13586 if (oldsize) {
13587@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13588 return err;
13589
13590 for (i = 0; i < old->size; i++)
13591- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13592+ write_ldt_entry(new->ldt, i, old->ldt + i);
13593 return 0;
13594 }
13595
13596@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13597 retval = copy_ldt(&mm->context, &old_mm->context);
13598 mutex_unlock(&old_mm->context.lock);
13599 }
13600+
13601+ if (tsk == current) {
13602+ mm->context.vdso = 0;
13603+
13604+#ifdef CONFIG_X86_32
13605+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13606+ mm->context.user_cs_base = 0UL;
13607+ mm->context.user_cs_limit = ~0UL;
13608+
13609+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13610+ cpus_clear(mm->context.cpu_user_cs_mask);
13611+#endif
13612+
13613+#endif
13614+#endif
13615+
13616+ }
13617+
13618 return retval;
13619 }
13620
13621@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13622 }
13623 }
13624
13625+#ifdef CONFIG_PAX_SEGMEXEC
13626+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13627+ error = -EINVAL;
13628+ goto out_unlock;
13629+ }
13630+#endif
13631+
13632 fill_ldt(&ldt, &ldt_info);
13633 if (oldmode)
13634 ldt.avl = 0;
13635diff -urNp linux-3.0.4/arch/x86/kernel/machine_kexec_32.c linux-3.0.4/arch/x86/kernel/machine_kexec_32.c
13636--- linux-3.0.4/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
13637+++ linux-3.0.4/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
13638@@ -27,7 +27,7 @@
13639 #include <asm/cacheflush.h>
13640 #include <asm/debugreg.h>
13641
13642-static void set_idt(void *newidt, __u16 limit)
13643+static void set_idt(struct desc_struct *newidt, __u16 limit)
13644 {
13645 struct desc_ptr curidt;
13646
13647@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13648 }
13649
13650
13651-static void set_gdt(void *newgdt, __u16 limit)
13652+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13653 {
13654 struct desc_ptr curgdt;
13655
13656@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13657 }
13658
13659 control_page = page_address(image->control_code_page);
13660- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13661+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13662
13663 relocate_kernel_ptr = control_page;
13664 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13665diff -urNp linux-3.0.4/arch/x86/kernel/microcode_intel.c linux-3.0.4/arch/x86/kernel/microcode_intel.c
13666--- linux-3.0.4/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
13667+++ linux-3.0.4/arch/x86/kernel/microcode_intel.c 2011-08-23 21:47:55.000000000 -0400
13668@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13669
13670 static int get_ucode_user(void *to, const void *from, size_t n)
13671 {
13672- return copy_from_user(to, from, n);
13673+ return copy_from_user(to, (__force const void __user *)from, n);
13674 }
13675
13676 static enum ucode_state
13677 request_microcode_user(int cpu, const void __user *buf, size_t size)
13678 {
13679- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13680+ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13681 }
13682
13683 static void microcode_fini_cpu(int cpu)
13684diff -urNp linux-3.0.4/arch/x86/kernel/module.c linux-3.0.4/arch/x86/kernel/module.c
13685--- linux-3.0.4/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
13686+++ linux-3.0.4/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
13687@@ -36,21 +36,66 @@
13688 #define DEBUGP(fmt...)
13689 #endif
13690
13691-void *module_alloc(unsigned long size)
13692+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13693 {
13694 if (PAGE_ALIGN(size) > MODULES_LEN)
13695 return NULL;
13696 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13697- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13698+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13699 -1, __builtin_return_address(0));
13700 }
13701
13702+void *module_alloc(unsigned long size)
13703+{
13704+
13705+#ifdef CONFIG_PAX_KERNEXEC
13706+ return __module_alloc(size, PAGE_KERNEL);
13707+#else
13708+ return __module_alloc(size, PAGE_KERNEL_EXEC);
13709+#endif
13710+
13711+}
13712+
13713 /* Free memory returned from module_alloc */
13714 void module_free(struct module *mod, void *module_region)
13715 {
13716 vfree(module_region);
13717 }
13718
13719+#ifdef CONFIG_PAX_KERNEXEC
13720+#ifdef CONFIG_X86_32
13721+void *module_alloc_exec(unsigned long size)
13722+{
13723+ struct vm_struct *area;
13724+
13725+ if (size == 0)
13726+ return NULL;
13727+
13728+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13729+ return area ? area->addr : NULL;
13730+}
13731+EXPORT_SYMBOL(module_alloc_exec);
13732+
13733+void module_free_exec(struct module *mod, void *module_region)
13734+{
13735+ vunmap(module_region);
13736+}
13737+EXPORT_SYMBOL(module_free_exec);
13738+#else
13739+void module_free_exec(struct module *mod, void *module_region)
13740+{
13741+ module_free(mod, module_region);
13742+}
13743+EXPORT_SYMBOL(module_free_exec);
13744+
13745+void *module_alloc_exec(unsigned long size)
13746+{
13747+ return __module_alloc(size, PAGE_KERNEL_RX);
13748+}
13749+EXPORT_SYMBOL(module_alloc_exec);
13750+#endif
13751+#endif
13752+
13753 /* We don't need anything special. */
13754 int module_frob_arch_sections(Elf_Ehdr *hdr,
13755 Elf_Shdr *sechdrs,
13756@@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13757 unsigned int i;
13758 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13759 Elf32_Sym *sym;
13760- uint32_t *location;
13761+ uint32_t *plocation, location;
13762
13763 DEBUGP("Applying relocate section %u to %u\n", relsec,
13764 sechdrs[relsec].sh_info);
13765 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13766 /* This is where to make the change */
13767- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13768- + rel[i].r_offset;
13769+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13770+ location = (uint32_t)plocation;
13771+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13772+ plocation = ktla_ktva((void *)plocation);
13773 /* This is the symbol it is referring to. Note that all
13774 undefined symbols have been resolved. */
13775 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13776@@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13777 switch (ELF32_R_TYPE(rel[i].r_info)) {
13778 case R_386_32:
13779 /* We add the value into the location given */
13780- *location += sym->st_value;
13781+ pax_open_kernel();
13782+ *plocation += sym->st_value;
13783+ pax_close_kernel();
13784 break;
13785 case R_386_PC32:
13786 /* Add the value, subtract its postition */
13787- *location += sym->st_value - (uint32_t)location;
13788+ pax_open_kernel();
13789+ *plocation += sym->st_value - location;
13790+ pax_close_kernel();
13791 break;
13792 default:
13793 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13794@@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13795 case R_X86_64_NONE:
13796 break;
13797 case R_X86_64_64:
13798+ pax_open_kernel();
13799 *(u64 *)loc = val;
13800+ pax_close_kernel();
13801 break;
13802 case R_X86_64_32:
13803+ pax_open_kernel();
13804 *(u32 *)loc = val;
13805+ pax_close_kernel();
13806 if (val != *(u32 *)loc)
13807 goto overflow;
13808 break;
13809 case R_X86_64_32S:
13810+ pax_open_kernel();
13811 *(s32 *)loc = val;
13812+ pax_close_kernel();
13813 if ((s64)val != *(s32 *)loc)
13814 goto overflow;
13815 break;
13816 case R_X86_64_PC32:
13817 val -= (u64)loc;
13818+ pax_open_kernel();
13819 *(u32 *)loc = val;
13820+ pax_close_kernel();
13821+
13822 #if 0
13823 if ((s64)val != *(s32 *)loc)
13824 goto overflow;
13825diff -urNp linux-3.0.4/arch/x86/kernel/paravirt.c linux-3.0.4/arch/x86/kernel/paravirt.c
13826--- linux-3.0.4/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
13827+++ linux-3.0.4/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
13828@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13829 {
13830 return x;
13831 }
13832+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13833+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13834+#endif
13835
13836 void __init default_banner(void)
13837 {
13838@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13839 * corresponding structure. */
13840 static void *get_call_destination(u8 type)
13841 {
13842- struct paravirt_patch_template tmpl = {
13843+ const struct paravirt_patch_template tmpl = {
13844 .pv_init_ops = pv_init_ops,
13845 .pv_time_ops = pv_time_ops,
13846 .pv_cpu_ops = pv_cpu_ops,
13847@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13848 .pv_lock_ops = pv_lock_ops,
13849 #endif
13850 };
13851+
13852+ pax_track_stack();
13853+
13854 return *((void **)&tmpl + type);
13855 }
13856
13857@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13858 if (opfunc == NULL)
13859 /* If there's no function, patch it with a ud2a (BUG) */
13860 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13861- else if (opfunc == _paravirt_nop)
13862+ else if (opfunc == (void *)_paravirt_nop)
13863 /* If the operation is a nop, then nop the callsite */
13864 ret = paravirt_patch_nop();
13865
13866 /* identity functions just return their single argument */
13867- else if (opfunc == _paravirt_ident_32)
13868+ else if (opfunc == (void *)_paravirt_ident_32)
13869 ret = paravirt_patch_ident_32(insnbuf, len);
13870- else if (opfunc == _paravirt_ident_64)
13871+ else if (opfunc == (void *)_paravirt_ident_64)
13872 ret = paravirt_patch_ident_64(insnbuf, len);
13873+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13874+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13875+ ret = paravirt_patch_ident_64(insnbuf, len);
13876+#endif
13877
13878 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13879 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13880@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13881 if (insn_len > len || start == NULL)
13882 insn_len = len;
13883 else
13884- memcpy(insnbuf, start, insn_len);
13885+ memcpy(insnbuf, ktla_ktva(start), insn_len);
13886
13887 return insn_len;
13888 }
13889@@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13890 preempt_enable();
13891 }
13892
13893-struct pv_info pv_info = {
13894+struct pv_info pv_info __read_only = {
13895 .name = "bare hardware",
13896 .paravirt_enabled = 0,
13897 .kernel_rpl = 0,
13898 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13899 };
13900
13901-struct pv_init_ops pv_init_ops = {
13902+struct pv_init_ops pv_init_ops __read_only = {
13903 .patch = native_patch,
13904 };
13905
13906-struct pv_time_ops pv_time_ops = {
13907+struct pv_time_ops pv_time_ops __read_only = {
13908 .sched_clock = native_sched_clock,
13909 };
13910
13911-struct pv_irq_ops pv_irq_ops = {
13912+struct pv_irq_ops pv_irq_ops __read_only = {
13913 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13914 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13915 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13916@@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
13917 #endif
13918 };
13919
13920-struct pv_cpu_ops pv_cpu_ops = {
13921+struct pv_cpu_ops pv_cpu_ops __read_only = {
13922 .cpuid = native_cpuid,
13923 .get_debugreg = native_get_debugreg,
13924 .set_debugreg = native_set_debugreg,
13925@@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
13926 .end_context_switch = paravirt_nop,
13927 };
13928
13929-struct pv_apic_ops pv_apic_ops = {
13930+struct pv_apic_ops pv_apic_ops __read_only = {
13931 #ifdef CONFIG_X86_LOCAL_APIC
13932 .startup_ipi_hook = paravirt_nop,
13933 #endif
13934 };
13935
13936-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
13937+#ifdef CONFIG_X86_32
13938+#ifdef CONFIG_X86_PAE
13939+/* 64-bit pagetable entries */
13940+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
13941+#else
13942 /* 32-bit pagetable entries */
13943 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
13944+#endif
13945 #else
13946 /* 64-bit pagetable entries */
13947 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
13948 #endif
13949
13950-struct pv_mmu_ops pv_mmu_ops = {
13951+struct pv_mmu_ops pv_mmu_ops __read_only = {
13952
13953 .read_cr2 = native_read_cr2,
13954 .write_cr2 = native_write_cr2,
13955@@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
13956 .make_pud = PTE_IDENT,
13957
13958 .set_pgd = native_set_pgd,
13959+ .set_pgd_batched = native_set_pgd_batched,
13960 #endif
13961 #endif /* PAGETABLE_LEVELS >= 3 */
13962
13963@@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
13964 },
13965
13966 .set_fixmap = native_set_fixmap,
13967+
13968+#ifdef CONFIG_PAX_KERNEXEC
13969+ .pax_open_kernel = native_pax_open_kernel,
13970+ .pax_close_kernel = native_pax_close_kernel,
13971+#endif
13972+
13973 };
13974
13975 EXPORT_SYMBOL_GPL(pv_time_ops);
13976diff -urNp linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c
13977--- linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
13978+++ linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
13979@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
13980 arch_spin_lock(lock);
13981 }
13982
13983-struct pv_lock_ops pv_lock_ops = {
13984+struct pv_lock_ops pv_lock_ops __read_only = {
13985 #ifdef CONFIG_SMP
13986 .spin_is_locked = __ticket_spin_is_locked,
13987 .spin_is_contended = __ticket_spin_is_contended,
13988diff -urNp linux-3.0.4/arch/x86/kernel/pci-iommu_table.c linux-3.0.4/arch/x86/kernel/pci-iommu_table.c
13989--- linux-3.0.4/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
13990+++ linux-3.0.4/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
13991@@ -2,7 +2,7 @@
13992 #include <asm/iommu_table.h>
13993 #include <linux/string.h>
13994 #include <linux/kallsyms.h>
13995-
13996+#include <linux/sched.h>
13997
13998 #define DEBUG 1
13999
14000@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14001 {
14002 struct iommu_table_entry *p, *q, *x;
14003
14004+ pax_track_stack();
14005+
14006 /* Simple cyclic dependency checker. */
14007 for (p = start; p < finish; p++) {
14008 q = find_dependents_of(start, finish, p);
14009diff -urNp linux-3.0.4/arch/x86/kernel/process_32.c linux-3.0.4/arch/x86/kernel/process_32.c
14010--- linux-3.0.4/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14011+++ linux-3.0.4/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14012@@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14013 unsigned long thread_saved_pc(struct task_struct *tsk)
14014 {
14015 return ((unsigned long *)tsk->thread.sp)[3];
14016+//XXX return tsk->thread.eip;
14017 }
14018
14019 #ifndef CONFIG_SMP
14020@@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14021 unsigned long sp;
14022 unsigned short ss, gs;
14023
14024- if (user_mode_vm(regs)) {
14025+ if (user_mode(regs)) {
14026 sp = regs->sp;
14027 ss = regs->ss & 0xffff;
14028- gs = get_user_gs(regs);
14029 } else {
14030 sp = kernel_stack_pointer(regs);
14031 savesegment(ss, ss);
14032- savesegment(gs, gs);
14033 }
14034+ gs = get_user_gs(regs);
14035
14036 show_regs_common();
14037
14038@@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14039 struct task_struct *tsk;
14040 int err;
14041
14042- childregs = task_pt_regs(p);
14043+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14044 *childregs = *regs;
14045 childregs->ax = 0;
14046 childregs->sp = sp;
14047
14048 p->thread.sp = (unsigned long) childregs;
14049 p->thread.sp0 = (unsigned long) (childregs+1);
14050+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14051
14052 p->thread.ip = (unsigned long) ret_from_fork;
14053
14054@@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14055 struct thread_struct *prev = &prev_p->thread,
14056 *next = &next_p->thread;
14057 int cpu = smp_processor_id();
14058- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14059+ struct tss_struct *tss = init_tss + cpu;
14060 bool preload_fpu;
14061
14062 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14063@@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14064 */
14065 lazy_save_gs(prev->gs);
14066
14067+#ifdef CONFIG_PAX_MEMORY_UDEREF
14068+ __set_fs(task_thread_info(next_p)->addr_limit);
14069+#endif
14070+
14071 /*
14072 * Load the per-thread Thread-Local Storage descriptor.
14073 */
14074@@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14075 */
14076 arch_end_context_switch(next_p);
14077
14078+ percpu_write(current_task, next_p);
14079+ percpu_write(current_tinfo, &next_p->tinfo);
14080+
14081 if (preload_fpu)
14082 __math_state_restore();
14083
14084@@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14085 if (prev->gs | next->gs)
14086 lazy_load_gs(next->gs);
14087
14088- percpu_write(current_task, next_p);
14089-
14090 return prev_p;
14091 }
14092
14093@@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14094 } while (count++ < 16);
14095 return 0;
14096 }
14097-
14098diff -urNp linux-3.0.4/arch/x86/kernel/process_64.c linux-3.0.4/arch/x86/kernel/process_64.c
14099--- linux-3.0.4/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
14100+++ linux-3.0.4/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
14101@@ -87,7 +87,7 @@ static void __exit_idle(void)
14102 void exit_idle(void)
14103 {
14104 /* idle loop has pid 0 */
14105- if (current->pid)
14106+ if (task_pid_nr(current))
14107 return;
14108 __exit_idle();
14109 }
14110@@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14111 struct pt_regs *childregs;
14112 struct task_struct *me = current;
14113
14114- childregs = ((struct pt_regs *)
14115- (THREAD_SIZE + task_stack_page(p))) - 1;
14116+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14117 *childregs = *regs;
14118
14119 childregs->ax = 0;
14120@@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14121 p->thread.sp = (unsigned long) childregs;
14122 p->thread.sp0 = (unsigned long) (childregs+1);
14123 p->thread.usersp = me->thread.usersp;
14124+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14125
14126 set_tsk_thread_flag(p, TIF_FORK);
14127
14128@@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14129 struct thread_struct *prev = &prev_p->thread;
14130 struct thread_struct *next = &next_p->thread;
14131 int cpu = smp_processor_id();
14132- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14133+ struct tss_struct *tss = init_tss + cpu;
14134 unsigned fsindex, gsindex;
14135 bool preload_fpu;
14136
14137@@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14138 prev->usersp = percpu_read(old_rsp);
14139 percpu_write(old_rsp, next->usersp);
14140 percpu_write(current_task, next_p);
14141+ percpu_write(current_tinfo, &next_p->tinfo);
14142
14143- percpu_write(kernel_stack,
14144- (unsigned long)task_stack_page(next_p) +
14145- THREAD_SIZE - KERNEL_STACK_OFFSET);
14146+ percpu_write(kernel_stack, next->sp0);
14147
14148 /*
14149 * Now maybe reload the debug registers and handle I/O bitmaps
14150@@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14151 if (!p || p == current || p->state == TASK_RUNNING)
14152 return 0;
14153 stack = (unsigned long)task_stack_page(p);
14154- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14155+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14156 return 0;
14157 fp = *(u64 *)(p->thread.sp);
14158 do {
14159- if (fp < (unsigned long)stack ||
14160- fp >= (unsigned long)stack+THREAD_SIZE)
14161+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14162 return 0;
14163 ip = *(u64 *)(fp+8);
14164 if (!in_sched_functions(ip))
14165diff -urNp linux-3.0.4/arch/x86/kernel/process.c linux-3.0.4/arch/x86/kernel/process.c
14166--- linux-3.0.4/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
14167+++ linux-3.0.4/arch/x86/kernel/process.c 2011-08-30 18:23:52.000000000 -0400
14168@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14169
14170 void free_thread_info(struct thread_info *ti)
14171 {
14172- free_thread_xstate(ti->task);
14173 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14174 }
14175
14176+static struct kmem_cache *task_struct_cachep;
14177+
14178 void arch_task_cache_init(void)
14179 {
14180- task_xstate_cachep =
14181- kmem_cache_create("task_xstate", xstate_size,
14182+ /* create a slab on which task_structs can be allocated */
14183+ task_struct_cachep =
14184+ kmem_cache_create("task_struct", sizeof(struct task_struct),
14185+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14186+
14187+ task_xstate_cachep =
14188+ kmem_cache_create("task_xstate", xstate_size,
14189 __alignof__(union thread_xstate),
14190- SLAB_PANIC | SLAB_NOTRACK, NULL);
14191+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14192+}
14193+
14194+struct task_struct *alloc_task_struct_node(int node)
14195+{
14196+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14197+}
14198+
14199+void free_task_struct(struct task_struct *task)
14200+{
14201+ free_thread_xstate(task);
14202+ kmem_cache_free(task_struct_cachep, task);
14203 }
14204
14205 /*
14206@@ -70,7 +87,7 @@ void exit_thread(void)
14207 unsigned long *bp = t->io_bitmap_ptr;
14208
14209 if (bp) {
14210- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14211+ struct tss_struct *tss = init_tss + get_cpu();
14212
14213 t->io_bitmap_ptr = NULL;
14214 clear_thread_flag(TIF_IO_BITMAP);
14215@@ -106,7 +123,7 @@ void show_regs_common(void)
14216
14217 printk(KERN_CONT "\n");
14218 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14219- current->pid, current->comm, print_tainted(),
14220+ task_pid_nr(current), current->comm, print_tainted(),
14221 init_utsname()->release,
14222 (int)strcspn(init_utsname()->version, " "),
14223 init_utsname()->version);
14224@@ -120,6 +137,9 @@ void flush_thread(void)
14225 {
14226 struct task_struct *tsk = current;
14227
14228+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14229+ loadsegment(gs, 0);
14230+#endif
14231 flush_ptrace_hw_breakpoint(tsk);
14232 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14233 /*
14234@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14235 regs.di = (unsigned long) arg;
14236
14237 #ifdef CONFIG_X86_32
14238- regs.ds = __USER_DS;
14239- regs.es = __USER_DS;
14240+ regs.ds = __KERNEL_DS;
14241+ regs.es = __KERNEL_DS;
14242 regs.fs = __KERNEL_PERCPU;
14243- regs.gs = __KERNEL_STACK_CANARY;
14244+ savesegment(gs, regs.gs);
14245 #else
14246 regs.ss = __KERNEL_DS;
14247 #endif
14248@@ -403,7 +423,7 @@ void default_idle(void)
14249 EXPORT_SYMBOL(default_idle);
14250 #endif
14251
14252-void stop_this_cpu(void *dummy)
14253+__noreturn void stop_this_cpu(void *dummy)
14254 {
14255 local_irq_disable();
14256 /*
14257@@ -668,16 +688,37 @@ static int __init idle_setup(char *str)
14258 }
14259 early_param("idle", idle_setup);
14260
14261-unsigned long arch_align_stack(unsigned long sp)
14262+#ifdef CONFIG_PAX_RANDKSTACK
14263+void pax_randomize_kstack(struct pt_regs *regs)
14264 {
14265- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14266- sp -= get_random_int() % 8192;
14267- return sp & ~0xf;
14268-}
14269+ struct thread_struct *thread = &current->thread;
14270+ unsigned long time;
14271
14272-unsigned long arch_randomize_brk(struct mm_struct *mm)
14273-{
14274- unsigned long range_end = mm->brk + 0x02000000;
14275- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14276-}
14277+ if (!randomize_va_space)
14278+ return;
14279+
14280+ if (v8086_mode(regs))
14281+ return;
14282
14283+ rdtscl(time);
14284+
14285+ /* P4 seems to return a 0 LSB, ignore it */
14286+#ifdef CONFIG_MPENTIUM4
14287+ time &= 0x3EUL;
14288+ time <<= 2;
14289+#elif defined(CONFIG_X86_64)
14290+ time &= 0xFUL;
14291+ time <<= 4;
14292+#else
14293+ time &= 0x1FUL;
14294+ time <<= 3;
14295+#endif
14296+
14297+ thread->sp0 ^= time;
14298+ load_sp0(init_tss + smp_processor_id(), thread);
14299+
14300+#ifdef CONFIG_X86_64
14301+ percpu_write(kernel_stack, thread->sp0);
14302+#endif
14303+}
14304+#endif
14305diff -urNp linux-3.0.4/arch/x86/kernel/ptrace.c linux-3.0.4/arch/x86/kernel/ptrace.c
14306--- linux-3.0.4/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
14307+++ linux-3.0.4/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
14308@@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14309 unsigned long addr, unsigned long data)
14310 {
14311 int ret;
14312- unsigned long __user *datap = (unsigned long __user *)data;
14313+ unsigned long __user *datap = (__force unsigned long __user *)data;
14314
14315 switch (request) {
14316 /* read the word at location addr in the USER area. */
14317@@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14318 if ((int) addr < 0)
14319 return -EIO;
14320 ret = do_get_thread_area(child, addr,
14321- (struct user_desc __user *)data);
14322+ (__force struct user_desc __user *) data);
14323 break;
14324
14325 case PTRACE_SET_THREAD_AREA:
14326 if ((int) addr < 0)
14327 return -EIO;
14328 ret = do_set_thread_area(child, addr,
14329- (struct user_desc __user *)data, 0);
14330+ (__force struct user_desc __user *) data, 0);
14331 break;
14332 #endif
14333
14334@@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14335 memset(info, 0, sizeof(*info));
14336 info->si_signo = SIGTRAP;
14337 info->si_code = si_code;
14338- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14339+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14340 }
14341
14342 void user_single_step_siginfo(struct task_struct *tsk,
14343diff -urNp linux-3.0.4/arch/x86/kernel/pvclock.c linux-3.0.4/arch/x86/kernel/pvclock.c
14344--- linux-3.0.4/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
14345+++ linux-3.0.4/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
14346@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14347 return pv_tsc_khz;
14348 }
14349
14350-static atomic64_t last_value = ATOMIC64_INIT(0);
14351+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14352
14353 void pvclock_resume(void)
14354 {
14355- atomic64_set(&last_value, 0);
14356+ atomic64_set_unchecked(&last_value, 0);
14357 }
14358
14359 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14360@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14361 * updating at the same time, and one of them could be slightly behind,
14362 * making the assumption that last_value always go forward fail to hold.
14363 */
14364- last = atomic64_read(&last_value);
14365+ last = atomic64_read_unchecked(&last_value);
14366 do {
14367 if (ret < last)
14368 return last;
14369- last = atomic64_cmpxchg(&last_value, last, ret);
14370+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14371 } while (unlikely(last != ret));
14372
14373 return ret;
14374diff -urNp linux-3.0.4/arch/x86/kernel/reboot.c linux-3.0.4/arch/x86/kernel/reboot.c
14375--- linux-3.0.4/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
14376+++ linux-3.0.4/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
14377@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14378 EXPORT_SYMBOL(pm_power_off);
14379
14380 static const struct desc_ptr no_idt = {};
14381-static int reboot_mode;
14382+static unsigned short reboot_mode;
14383 enum reboot_type reboot_type = BOOT_ACPI;
14384 int reboot_force;
14385
14386@@ -315,13 +315,17 @@ core_initcall(reboot_init);
14387 extern const unsigned char machine_real_restart_asm[];
14388 extern const u64 machine_real_restart_gdt[3];
14389
14390-void machine_real_restart(unsigned int type)
14391+__noreturn void machine_real_restart(unsigned int type)
14392 {
14393 void *restart_va;
14394 unsigned long restart_pa;
14395- void (*restart_lowmem)(unsigned int);
14396+ void (* __noreturn restart_lowmem)(unsigned int);
14397 u64 *lowmem_gdt;
14398
14399+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14400+ struct desc_struct *gdt;
14401+#endif
14402+
14403 local_irq_disable();
14404
14405 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14406@@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
14407 boot)". This seems like a fairly standard thing that gets set by
14408 REBOOT.COM programs, and the previous reset routine did this
14409 too. */
14410- *((unsigned short *)0x472) = reboot_mode;
14411+ *(unsigned short *)(__va(0x472)) = reboot_mode;
14412
14413 /* Patch the GDT in the low memory trampoline */
14414 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14415
14416 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14417 restart_pa = virt_to_phys(restart_va);
14418- restart_lowmem = (void (*)(unsigned int))restart_pa;
14419+ restart_lowmem = (void *)restart_pa;
14420
14421 /* GDT[0]: GDT self-pointer */
14422 lowmem_gdt[0] =
14423@@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
14424 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14425
14426 /* Jump to the identity-mapped low memory code */
14427+
14428+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14429+ gdt = get_cpu_gdt_table(smp_processor_id());
14430+ pax_open_kernel();
14431+#ifdef CONFIG_PAX_MEMORY_UDEREF
14432+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14433+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14434+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14435+#endif
14436+#ifdef CONFIG_PAX_KERNEXEC
14437+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14438+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14439+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14440+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14441+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14442+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14443+#endif
14444+ pax_close_kernel();
14445+#endif
14446+
14447+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14448+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14449+ unreachable();
14450+#else
14451 restart_lowmem(type);
14452+#endif
14453+
14454 }
14455 #ifdef CONFIG_APM_MODULE
14456 EXPORT_SYMBOL(machine_real_restart);
14457@@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
14458 * try to force a triple fault and then cycle between hitting the keyboard
14459 * controller and doing that
14460 */
14461-static void native_machine_emergency_restart(void)
14462+__noreturn static void native_machine_emergency_restart(void)
14463 {
14464 int i;
14465 int attempt = 0;
14466@@ -647,13 +677,13 @@ void native_machine_shutdown(void)
14467 #endif
14468 }
14469
14470-static void __machine_emergency_restart(int emergency)
14471+static __noreturn void __machine_emergency_restart(int emergency)
14472 {
14473 reboot_emergency = emergency;
14474 machine_ops.emergency_restart();
14475 }
14476
14477-static void native_machine_restart(char *__unused)
14478+static __noreturn void native_machine_restart(char *__unused)
14479 {
14480 printk("machine restart\n");
14481
14482@@ -662,7 +692,7 @@ static void native_machine_restart(char
14483 __machine_emergency_restart(0);
14484 }
14485
14486-static void native_machine_halt(void)
14487+static __noreturn void native_machine_halt(void)
14488 {
14489 /* stop other cpus and apics */
14490 machine_shutdown();
14491@@ -673,7 +703,7 @@ static void native_machine_halt(void)
14492 stop_this_cpu(NULL);
14493 }
14494
14495-static void native_machine_power_off(void)
14496+__noreturn static void native_machine_power_off(void)
14497 {
14498 if (pm_power_off) {
14499 if (!reboot_force)
14500@@ -682,6 +712,7 @@ static void native_machine_power_off(voi
14501 }
14502 /* a fallback in case there is no PM info available */
14503 tboot_shutdown(TB_SHUTDOWN_HALT);
14504+ unreachable();
14505 }
14506
14507 struct machine_ops machine_ops = {
14508diff -urNp linux-3.0.4/arch/x86/kernel/setup.c linux-3.0.4/arch/x86/kernel/setup.c
14509--- linux-3.0.4/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
14510+++ linux-3.0.4/arch/x86/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
14511@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14512 * area (640->1Mb) as ram even though it is not.
14513 * take them out.
14514 */
14515- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14516+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14517 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14518 }
14519
14520@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
14521
14522 if (!boot_params.hdr.root_flags)
14523 root_mountflags &= ~MS_RDONLY;
14524- init_mm.start_code = (unsigned long) _text;
14525- init_mm.end_code = (unsigned long) _etext;
14526+ init_mm.start_code = ktla_ktva((unsigned long) _text);
14527+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
14528 init_mm.end_data = (unsigned long) _edata;
14529 init_mm.brk = _brk_end;
14530
14531- code_resource.start = virt_to_phys(_text);
14532- code_resource.end = virt_to_phys(_etext)-1;
14533- data_resource.start = virt_to_phys(_etext);
14534+ code_resource.start = virt_to_phys(ktla_ktva(_text));
14535+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14536+ data_resource.start = virt_to_phys(_sdata);
14537 data_resource.end = virt_to_phys(_edata)-1;
14538 bss_resource.start = virt_to_phys(&__bss_start);
14539 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14540diff -urNp linux-3.0.4/arch/x86/kernel/setup_percpu.c linux-3.0.4/arch/x86/kernel/setup_percpu.c
14541--- linux-3.0.4/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
14542+++ linux-3.0.4/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
14543@@ -21,19 +21,17 @@
14544 #include <asm/cpu.h>
14545 #include <asm/stackprotector.h>
14546
14547-DEFINE_PER_CPU(int, cpu_number);
14548+#ifdef CONFIG_SMP
14549+DEFINE_PER_CPU(unsigned int, cpu_number);
14550 EXPORT_PER_CPU_SYMBOL(cpu_number);
14551+#endif
14552
14553-#ifdef CONFIG_X86_64
14554 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14555-#else
14556-#define BOOT_PERCPU_OFFSET 0
14557-#endif
14558
14559 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14560 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14561
14562-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14563+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14564 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14565 };
14566 EXPORT_SYMBOL(__per_cpu_offset);
14567@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14568 {
14569 #ifdef CONFIG_X86_32
14570 struct desc_struct gdt;
14571+ unsigned long base = per_cpu_offset(cpu);
14572
14573- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14574- 0x2 | DESCTYPE_S, 0x8);
14575- gdt.s = 1;
14576+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14577+ 0x83 | DESCTYPE_S, 0xC);
14578 write_gdt_entry(get_cpu_gdt_table(cpu),
14579 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14580 #endif
14581@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14582 /* alrighty, percpu areas up and running */
14583 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14584 for_each_possible_cpu(cpu) {
14585+#ifdef CONFIG_CC_STACKPROTECTOR
14586+#ifdef CONFIG_X86_32
14587+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
14588+#endif
14589+#endif
14590 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14591 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14592 per_cpu(cpu_number, cpu) = cpu;
14593@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14594 */
14595 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14596 #endif
14597+#ifdef CONFIG_CC_STACKPROTECTOR
14598+#ifdef CONFIG_X86_32
14599+ if (!cpu)
14600+ per_cpu(stack_canary.canary, cpu) = canary;
14601+#endif
14602+#endif
14603 /*
14604 * Up to this point, the boot CPU has been using .init.data
14605 * area. Reload any changed state for the boot CPU.
14606diff -urNp linux-3.0.4/arch/x86/kernel/signal.c linux-3.0.4/arch/x86/kernel/signal.c
14607--- linux-3.0.4/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
14608+++ linux-3.0.4/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
14609@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14610 * Align the stack pointer according to the i386 ABI,
14611 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14612 */
14613- sp = ((sp + 4) & -16ul) - 4;
14614+ sp = ((sp - 12) & -16ul) - 4;
14615 #else /* !CONFIG_X86_32 */
14616 sp = round_down(sp, 16) - 8;
14617 #endif
14618@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14619 * Return an always-bogus address instead so we will die with SIGSEGV.
14620 */
14621 if (onsigstack && !likely(on_sig_stack(sp)))
14622- return (void __user *)-1L;
14623+ return (__force void __user *)-1L;
14624
14625 /* save i387 state */
14626 if (used_math() && save_i387_xstate(*fpstate) < 0)
14627- return (void __user *)-1L;
14628+ return (__force void __user *)-1L;
14629
14630 return (void __user *)sp;
14631 }
14632@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14633 }
14634
14635 if (current->mm->context.vdso)
14636- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14637+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14638 else
14639- restorer = &frame->retcode;
14640+ restorer = (void __user *)&frame->retcode;
14641 if (ka->sa.sa_flags & SA_RESTORER)
14642 restorer = ka->sa.sa_restorer;
14643
14644@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14645 * reasons and because gdb uses it as a signature to notice
14646 * signal handler stack frames.
14647 */
14648- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14649+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14650
14651 if (err)
14652 return -EFAULT;
14653@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14654 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14655
14656 /* Set up to return from userspace. */
14657- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14658+ if (current->mm->context.vdso)
14659+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14660+ else
14661+ restorer = (void __user *)&frame->retcode;
14662 if (ka->sa.sa_flags & SA_RESTORER)
14663 restorer = ka->sa.sa_restorer;
14664 put_user_ex(restorer, &frame->pretcode);
14665@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14666 * reasons and because gdb uses it as a signature to notice
14667 * signal handler stack frames.
14668 */
14669- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14670+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14671 } put_user_catch(err);
14672
14673 if (err)
14674@@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
14675 int signr;
14676 sigset_t *oldset;
14677
14678+ pax_track_stack();
14679+
14680 /*
14681 * We want the common case to go fast, which is why we may in certain
14682 * cases get here from kernel mode. Just return without doing anything
14683@@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
14684 * X86_32: vm86 regs switched out by assembly code before reaching
14685 * here, so testing against kernel CS suffices.
14686 */
14687- if (!user_mode(regs))
14688+ if (!user_mode_novm(regs))
14689 return;
14690
14691 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14692diff -urNp linux-3.0.4/arch/x86/kernel/smpboot.c linux-3.0.4/arch/x86/kernel/smpboot.c
14693--- linux-3.0.4/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
14694+++ linux-3.0.4/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
14695@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14696 set_idle_for_cpu(cpu, c_idle.idle);
14697 do_rest:
14698 per_cpu(current_task, cpu) = c_idle.idle;
14699+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14700 #ifdef CONFIG_X86_32
14701 /* Stack for startup_32 can be just as for start_secondary onwards */
14702 irq_ctx_init(cpu);
14703 #else
14704 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14705 initial_gs = per_cpu_offset(cpu);
14706- per_cpu(kernel_stack, cpu) =
14707- (unsigned long)task_stack_page(c_idle.idle) -
14708- KERNEL_STACK_OFFSET + THREAD_SIZE;
14709+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14710 #endif
14711+
14712+ pax_open_kernel();
14713 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14714+ pax_close_kernel();
14715+
14716 initial_code = (unsigned long)start_secondary;
14717 stack_start = c_idle.idle->thread.sp;
14718
14719@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14720
14721 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14722
14723+#ifdef CONFIG_PAX_PER_CPU_PGD
14724+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14725+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14726+ KERNEL_PGD_PTRS);
14727+#endif
14728+
14729 err = do_boot_cpu(apicid, cpu);
14730 if (err) {
14731 pr_debug("do_boot_cpu failed %d\n", err);
14732diff -urNp linux-3.0.4/arch/x86/kernel/step.c linux-3.0.4/arch/x86/kernel/step.c
14733--- linux-3.0.4/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
14734+++ linux-3.0.4/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
14735@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14736 struct desc_struct *desc;
14737 unsigned long base;
14738
14739- seg &= ~7UL;
14740+ seg >>= 3;
14741
14742 mutex_lock(&child->mm->context.lock);
14743- if (unlikely((seg >> 3) >= child->mm->context.size))
14744+ if (unlikely(seg >= child->mm->context.size))
14745 addr = -1L; /* bogus selector, access would fault */
14746 else {
14747 desc = child->mm->context.ldt + seg;
14748@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14749 addr += base;
14750 }
14751 mutex_unlock(&child->mm->context.lock);
14752- }
14753+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14754+ addr = ktla_ktva(addr);
14755
14756 return addr;
14757 }
14758@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14759 unsigned char opcode[15];
14760 unsigned long addr = convert_ip_to_linear(child, regs);
14761
14762+ if (addr == -EINVAL)
14763+ return 0;
14764+
14765 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14766 for (i = 0; i < copied; i++) {
14767 switch (opcode[i]) {
14768@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14769
14770 #ifdef CONFIG_X86_64
14771 case 0x40 ... 0x4f:
14772- if (regs->cs != __USER_CS)
14773+ if ((regs->cs & 0xffff) != __USER_CS)
14774 /* 32-bit mode: register increment */
14775 return 0;
14776 /* 64-bit mode: REX prefix */
14777diff -urNp linux-3.0.4/arch/x86/kernel/syscall_table_32.S linux-3.0.4/arch/x86/kernel/syscall_table_32.S
14778--- linux-3.0.4/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
14779+++ linux-3.0.4/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
14780@@ -1,3 +1,4 @@
14781+.section .rodata,"a",@progbits
14782 ENTRY(sys_call_table)
14783 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14784 .long sys_exit
14785diff -urNp linux-3.0.4/arch/x86/kernel/sys_i386_32.c linux-3.0.4/arch/x86/kernel/sys_i386_32.c
14786--- linux-3.0.4/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
14787+++ linux-3.0.4/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
14788@@ -24,17 +24,224 @@
14789
14790 #include <asm/syscalls.h>
14791
14792-/*
14793- * Do a system call from kernel instead of calling sys_execve so we
14794- * end up with proper pt_regs.
14795- */
14796-int kernel_execve(const char *filename,
14797- const char *const argv[],
14798- const char *const envp[])
14799+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14800 {
14801- long __res;
14802- asm volatile ("int $0x80"
14803- : "=a" (__res)
14804- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14805- return __res;
14806+ unsigned long pax_task_size = TASK_SIZE;
14807+
14808+#ifdef CONFIG_PAX_SEGMEXEC
14809+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14810+ pax_task_size = SEGMEXEC_TASK_SIZE;
14811+#endif
14812+
14813+ if (len > pax_task_size || addr > pax_task_size - len)
14814+ return -EINVAL;
14815+
14816+ return 0;
14817+}
14818+
14819+unsigned long
14820+arch_get_unmapped_area(struct file *filp, unsigned long addr,
14821+ unsigned long len, unsigned long pgoff, unsigned long flags)
14822+{
14823+ struct mm_struct *mm = current->mm;
14824+ struct vm_area_struct *vma;
14825+ unsigned long start_addr, pax_task_size = TASK_SIZE;
14826+
14827+#ifdef CONFIG_PAX_SEGMEXEC
14828+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
14829+ pax_task_size = SEGMEXEC_TASK_SIZE;
14830+#endif
14831+
14832+ pax_task_size -= PAGE_SIZE;
14833+
14834+ if (len > pax_task_size)
14835+ return -ENOMEM;
14836+
14837+ if (flags & MAP_FIXED)
14838+ return addr;
14839+
14840+#ifdef CONFIG_PAX_RANDMMAP
14841+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14842+#endif
14843+
14844+ if (addr) {
14845+ addr = PAGE_ALIGN(addr);
14846+ if (pax_task_size - len >= addr) {
14847+ vma = find_vma(mm, addr);
14848+ if (check_heap_stack_gap(vma, addr, len))
14849+ return addr;
14850+ }
14851+ }
14852+ if (len > mm->cached_hole_size) {
14853+ start_addr = addr = mm->free_area_cache;
14854+ } else {
14855+ start_addr = addr = mm->mmap_base;
14856+ mm->cached_hole_size = 0;
14857+ }
14858+
14859+#ifdef CONFIG_PAX_PAGEEXEC
14860+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14861+ start_addr = 0x00110000UL;
14862+
14863+#ifdef CONFIG_PAX_RANDMMAP
14864+ if (mm->pax_flags & MF_PAX_RANDMMAP)
14865+ start_addr += mm->delta_mmap & 0x03FFF000UL;
14866+#endif
14867+
14868+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14869+ start_addr = addr = mm->mmap_base;
14870+ else
14871+ addr = start_addr;
14872+ }
14873+#endif
14874+
14875+full_search:
14876+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14877+ /* At this point: (!vma || addr < vma->vm_end). */
14878+ if (pax_task_size - len < addr) {
14879+ /*
14880+ * Start a new search - just in case we missed
14881+ * some holes.
14882+ */
14883+ if (start_addr != mm->mmap_base) {
14884+ start_addr = addr = mm->mmap_base;
14885+ mm->cached_hole_size = 0;
14886+ goto full_search;
14887+ }
14888+ return -ENOMEM;
14889+ }
14890+ if (check_heap_stack_gap(vma, addr, len))
14891+ break;
14892+ if (addr + mm->cached_hole_size < vma->vm_start)
14893+ mm->cached_hole_size = vma->vm_start - addr;
14894+ addr = vma->vm_end;
14895+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
14896+ start_addr = addr = mm->mmap_base;
14897+ mm->cached_hole_size = 0;
14898+ goto full_search;
14899+ }
14900+ }
14901+
14902+ /*
14903+ * Remember the place where we stopped the search:
14904+ */
14905+ mm->free_area_cache = addr + len;
14906+ return addr;
14907+}
14908+
14909+unsigned long
14910+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14911+ const unsigned long len, const unsigned long pgoff,
14912+ const unsigned long flags)
14913+{
14914+ struct vm_area_struct *vma;
14915+ struct mm_struct *mm = current->mm;
14916+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
14917+
14918+#ifdef CONFIG_PAX_SEGMEXEC
14919+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
14920+ pax_task_size = SEGMEXEC_TASK_SIZE;
14921+#endif
14922+
14923+ pax_task_size -= PAGE_SIZE;
14924+
14925+ /* requested length too big for entire address space */
14926+ if (len > pax_task_size)
14927+ return -ENOMEM;
14928+
14929+ if (flags & MAP_FIXED)
14930+ return addr;
14931+
14932+#ifdef CONFIG_PAX_PAGEEXEC
14933+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
14934+ goto bottomup;
14935+#endif
14936+
14937+#ifdef CONFIG_PAX_RANDMMAP
14938+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14939+#endif
14940+
14941+ /* requesting a specific address */
14942+ if (addr) {
14943+ addr = PAGE_ALIGN(addr);
14944+ if (pax_task_size - len >= addr) {
14945+ vma = find_vma(mm, addr);
14946+ if (check_heap_stack_gap(vma, addr, len))
14947+ return addr;
14948+ }
14949+ }
14950+
14951+ /* check if free_area_cache is useful for us */
14952+ if (len <= mm->cached_hole_size) {
14953+ mm->cached_hole_size = 0;
14954+ mm->free_area_cache = mm->mmap_base;
14955+ }
14956+
14957+ /* either no address requested or can't fit in requested address hole */
14958+ addr = mm->free_area_cache;
14959+
14960+ /* make sure it can fit in the remaining address space */
14961+ if (addr > len) {
14962+ vma = find_vma(mm, addr-len);
14963+ if (check_heap_stack_gap(vma, addr - len, len))
14964+ /* remember the address as a hint for next time */
14965+ return (mm->free_area_cache = addr-len);
14966+ }
14967+
14968+ if (mm->mmap_base < len)
14969+ goto bottomup;
14970+
14971+ addr = mm->mmap_base-len;
14972+
14973+ do {
14974+ /*
14975+ * Lookup failure means no vma is above this address,
14976+ * else if new region fits below vma->vm_start,
14977+ * return with success:
14978+ */
14979+ vma = find_vma(mm, addr);
14980+ if (check_heap_stack_gap(vma, addr, len))
14981+ /* remember the address as a hint for next time */
14982+ return (mm->free_area_cache = addr);
14983+
14984+ /* remember the largest hole we saw so far */
14985+ if (addr + mm->cached_hole_size < vma->vm_start)
14986+ mm->cached_hole_size = vma->vm_start - addr;
14987+
14988+ /* try just below the current vma->vm_start */
14989+ addr = skip_heap_stack_gap(vma, len);
14990+ } while (!IS_ERR_VALUE(addr));
14991+
14992+bottomup:
14993+ /*
14994+ * A failed mmap() very likely causes application failure,
14995+ * so fall back to the bottom-up function here. This scenario
14996+ * can happen with large stack limits and large mmap()
14997+ * allocations.
14998+ */
14999+
15000+#ifdef CONFIG_PAX_SEGMEXEC
15001+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15002+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15003+ else
15004+#endif
15005+
15006+ mm->mmap_base = TASK_UNMAPPED_BASE;
15007+
15008+#ifdef CONFIG_PAX_RANDMMAP
15009+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15010+ mm->mmap_base += mm->delta_mmap;
15011+#endif
15012+
15013+ mm->free_area_cache = mm->mmap_base;
15014+ mm->cached_hole_size = ~0UL;
15015+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15016+ /*
15017+ * Restore the topdown base:
15018+ */
15019+ mm->mmap_base = base;
15020+ mm->free_area_cache = base;
15021+ mm->cached_hole_size = ~0UL;
15022+
15023+ return addr;
15024 }
15025diff -urNp linux-3.0.4/arch/x86/kernel/sys_x86_64.c linux-3.0.4/arch/x86/kernel/sys_x86_64.c
15026--- linux-3.0.4/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15027+++ linux-3.0.4/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15028@@ -32,8 +32,8 @@ out:
15029 return error;
15030 }
15031
15032-static void find_start_end(unsigned long flags, unsigned long *begin,
15033- unsigned long *end)
15034+static void find_start_end(struct mm_struct *mm, unsigned long flags,
15035+ unsigned long *begin, unsigned long *end)
15036 {
15037 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15038 unsigned long new_begin;
15039@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15040 *begin = new_begin;
15041 }
15042 } else {
15043- *begin = TASK_UNMAPPED_BASE;
15044+ *begin = mm->mmap_base;
15045 *end = TASK_SIZE;
15046 }
15047 }
15048@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15049 if (flags & MAP_FIXED)
15050 return addr;
15051
15052- find_start_end(flags, &begin, &end);
15053+ find_start_end(mm, flags, &begin, &end);
15054
15055 if (len > end)
15056 return -ENOMEM;
15057
15058+#ifdef CONFIG_PAX_RANDMMAP
15059+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15060+#endif
15061+
15062 if (addr) {
15063 addr = PAGE_ALIGN(addr);
15064 vma = find_vma(mm, addr);
15065- if (end - len >= addr &&
15066- (!vma || addr + len <= vma->vm_start))
15067+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15068 return addr;
15069 }
15070 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15071@@ -106,7 +109,7 @@ full_search:
15072 }
15073 return -ENOMEM;
15074 }
15075- if (!vma || addr + len <= vma->vm_start) {
15076+ if (check_heap_stack_gap(vma, addr, len)) {
15077 /*
15078 * Remember the place where we stopped the search:
15079 */
15080@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15081 {
15082 struct vm_area_struct *vma;
15083 struct mm_struct *mm = current->mm;
15084- unsigned long addr = addr0;
15085+ unsigned long base = mm->mmap_base, addr = addr0;
15086
15087 /* requested length too big for entire address space */
15088 if (len > TASK_SIZE)
15089@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15090 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15091 goto bottomup;
15092
15093+#ifdef CONFIG_PAX_RANDMMAP
15094+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15095+#endif
15096+
15097 /* requesting a specific address */
15098 if (addr) {
15099 addr = PAGE_ALIGN(addr);
15100- vma = find_vma(mm, addr);
15101- if (TASK_SIZE - len >= addr &&
15102- (!vma || addr + len <= vma->vm_start))
15103- return addr;
15104+ if (TASK_SIZE - len >= addr) {
15105+ vma = find_vma(mm, addr);
15106+ if (check_heap_stack_gap(vma, addr, len))
15107+ return addr;
15108+ }
15109 }
15110
15111 /* check if free_area_cache is useful for us */
15112@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15113 /* make sure it can fit in the remaining address space */
15114 if (addr > len) {
15115 vma = find_vma(mm, addr-len);
15116- if (!vma || addr <= vma->vm_start)
15117+ if (check_heap_stack_gap(vma, addr - len, len))
15118 /* remember the address as a hint for next time */
15119 return mm->free_area_cache = addr-len;
15120 }
15121@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15122 * return with success:
15123 */
15124 vma = find_vma(mm, addr);
15125- if (!vma || addr+len <= vma->vm_start)
15126+ if (check_heap_stack_gap(vma, addr, len))
15127 /* remember the address as a hint for next time */
15128 return mm->free_area_cache = addr;
15129
15130@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15131 mm->cached_hole_size = vma->vm_start - addr;
15132
15133 /* try just below the current vma->vm_start */
15134- addr = vma->vm_start-len;
15135- } while (len < vma->vm_start);
15136+ addr = skip_heap_stack_gap(vma, len);
15137+ } while (!IS_ERR_VALUE(addr));
15138
15139 bottomup:
15140 /*
15141@@ -198,13 +206,21 @@ bottomup:
15142 * can happen with large stack limits and large mmap()
15143 * allocations.
15144 */
15145+ mm->mmap_base = TASK_UNMAPPED_BASE;
15146+
15147+#ifdef CONFIG_PAX_RANDMMAP
15148+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15149+ mm->mmap_base += mm->delta_mmap;
15150+#endif
15151+
15152+ mm->free_area_cache = mm->mmap_base;
15153 mm->cached_hole_size = ~0UL;
15154- mm->free_area_cache = TASK_UNMAPPED_BASE;
15155 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15156 /*
15157 * Restore the topdown base:
15158 */
15159- mm->free_area_cache = mm->mmap_base;
15160+ mm->mmap_base = base;
15161+ mm->free_area_cache = base;
15162 mm->cached_hole_size = ~0UL;
15163
15164 return addr;
15165diff -urNp linux-3.0.4/arch/x86/kernel/tboot.c linux-3.0.4/arch/x86/kernel/tboot.c
15166--- linux-3.0.4/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15167+++ linux-3.0.4/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15168@@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15169
15170 void tboot_shutdown(u32 shutdown_type)
15171 {
15172- void (*shutdown)(void);
15173+ void (* __noreturn shutdown)(void);
15174
15175 if (!tboot_enabled())
15176 return;
15177@@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15178
15179 switch_to_tboot_pt();
15180
15181- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15182+ shutdown = (void *)tboot->shutdown_entry;
15183 shutdown();
15184
15185 /* should not reach here */
15186@@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15187 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15188 }
15189
15190-static atomic_t ap_wfs_count;
15191+static atomic_unchecked_t ap_wfs_count;
15192
15193 static int tboot_wait_for_aps(int num_aps)
15194 {
15195@@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15196 {
15197 switch (action) {
15198 case CPU_DYING:
15199- atomic_inc(&ap_wfs_count);
15200+ atomic_inc_unchecked(&ap_wfs_count);
15201 if (num_online_cpus() == 1)
15202- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15203+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15204 return NOTIFY_BAD;
15205 break;
15206 }
15207@@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15208
15209 tboot_create_trampoline();
15210
15211- atomic_set(&ap_wfs_count, 0);
15212+ atomic_set_unchecked(&ap_wfs_count, 0);
15213 register_hotcpu_notifier(&tboot_cpu_notifier);
15214 return 0;
15215 }
15216diff -urNp linux-3.0.4/arch/x86/kernel/time.c linux-3.0.4/arch/x86/kernel/time.c
15217--- linux-3.0.4/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
15218+++ linux-3.0.4/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
15219@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
15220 {
15221 unsigned long pc = instruction_pointer(regs);
15222
15223- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15224+ if (!user_mode(regs) && in_lock_functions(pc)) {
15225 #ifdef CONFIG_FRAME_POINTER
15226- return *(unsigned long *)(regs->bp + sizeof(long));
15227+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15228 #else
15229 unsigned long *sp =
15230 (unsigned long *)kernel_stack_pointer(regs);
15231@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
15232 * or above a saved flags. Eflags has bits 22-31 zero,
15233 * kernel addresses don't.
15234 */
15235+
15236+#ifdef CONFIG_PAX_KERNEXEC
15237+ return ktla_ktva(sp[0]);
15238+#else
15239 if (sp[0] >> 22)
15240 return sp[0];
15241 if (sp[1] >> 22)
15242 return sp[1];
15243 #endif
15244+
15245+#endif
15246 }
15247 return pc;
15248 }
15249diff -urNp linux-3.0.4/arch/x86/kernel/tls.c linux-3.0.4/arch/x86/kernel/tls.c
15250--- linux-3.0.4/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
15251+++ linux-3.0.4/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
15252@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15253 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15254 return -EINVAL;
15255
15256+#ifdef CONFIG_PAX_SEGMEXEC
15257+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15258+ return -EINVAL;
15259+#endif
15260+
15261 set_tls_desc(p, idx, &info, 1);
15262
15263 return 0;
15264diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_32.S linux-3.0.4/arch/x86/kernel/trampoline_32.S
15265--- linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
15266+++ linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
15267@@ -32,6 +32,12 @@
15268 #include <asm/segment.h>
15269 #include <asm/page_types.h>
15270
15271+#ifdef CONFIG_PAX_KERNEXEC
15272+#define ta(X) (X)
15273+#else
15274+#define ta(X) ((X) - __PAGE_OFFSET)
15275+#endif
15276+
15277 #ifdef CONFIG_SMP
15278
15279 .section ".x86_trampoline","a"
15280@@ -62,7 +68,7 @@ r_base = .
15281 inc %ax # protected mode (PE) bit
15282 lmsw %ax # into protected mode
15283 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15284- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15285+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
15286
15287 # These need to be in the same 64K segment as the above;
15288 # hence we don't use the boot_gdt_descr defined in head.S
15289diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_64.S linux-3.0.4/arch/x86/kernel/trampoline_64.S
15290--- linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
15291+++ linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
15292@@ -90,7 +90,7 @@ startup_32:
15293 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15294 movl %eax, %ds
15295
15296- movl $X86_CR4_PAE, %eax
15297+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15298 movl %eax, %cr4 # Enable PAE mode
15299
15300 # Setup trampoline 4 level pagetables
15301@@ -138,7 +138,7 @@ tidt:
15302 # so the kernel can live anywhere
15303 .balign 4
15304 tgdt:
15305- .short tgdt_end - tgdt # gdt limit
15306+ .short tgdt_end - tgdt - 1 # gdt limit
15307 .long tgdt - r_base
15308 .short 0
15309 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15310diff -urNp linux-3.0.4/arch/x86/kernel/traps.c linux-3.0.4/arch/x86/kernel/traps.c
15311--- linux-3.0.4/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
15312+++ linux-3.0.4/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
15313@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15314
15315 /* Do we ignore FPU interrupts ? */
15316 char ignore_fpu_irq;
15317-
15318-/*
15319- * The IDT has to be page-aligned to simplify the Pentium
15320- * F0 0F bug workaround.
15321- */
15322-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15323 #endif
15324
15325 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15326@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15327 }
15328
15329 static void __kprobes
15330-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15331+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15332 long error_code, siginfo_t *info)
15333 {
15334 struct task_struct *tsk = current;
15335
15336 #ifdef CONFIG_X86_32
15337- if (regs->flags & X86_VM_MASK) {
15338+ if (v8086_mode(regs)) {
15339 /*
15340 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15341 * On nmi (interrupt 2), do_trap should not be called.
15342@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15343 }
15344 #endif
15345
15346- if (!user_mode(regs))
15347+ if (!user_mode_novm(regs))
15348 goto kernel_trap;
15349
15350 #ifdef CONFIG_X86_32
15351@@ -157,7 +151,7 @@ trap_signal:
15352 printk_ratelimit()) {
15353 printk(KERN_INFO
15354 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15355- tsk->comm, tsk->pid, str,
15356+ tsk->comm, task_pid_nr(tsk), str,
15357 regs->ip, regs->sp, error_code);
15358 print_vma_addr(" in ", regs->ip);
15359 printk("\n");
15360@@ -174,8 +168,20 @@ kernel_trap:
15361 if (!fixup_exception(regs)) {
15362 tsk->thread.error_code = error_code;
15363 tsk->thread.trap_no = trapnr;
15364+
15365+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15366+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15367+ str = "PAX: suspicious stack segment fault";
15368+#endif
15369+
15370 die(str, regs, error_code);
15371 }
15372+
15373+#ifdef CONFIG_PAX_REFCOUNT
15374+ if (trapnr == 4)
15375+ pax_report_refcount_overflow(regs);
15376+#endif
15377+
15378 return;
15379
15380 #ifdef CONFIG_X86_32
15381@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15382 conditional_sti(regs);
15383
15384 #ifdef CONFIG_X86_32
15385- if (regs->flags & X86_VM_MASK)
15386+ if (v8086_mode(regs))
15387 goto gp_in_vm86;
15388 #endif
15389
15390 tsk = current;
15391- if (!user_mode(regs))
15392+ if (!user_mode_novm(regs))
15393 goto gp_in_kernel;
15394
15395+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15396+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15397+ struct mm_struct *mm = tsk->mm;
15398+ unsigned long limit;
15399+
15400+ down_write(&mm->mmap_sem);
15401+ limit = mm->context.user_cs_limit;
15402+ if (limit < TASK_SIZE) {
15403+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15404+ up_write(&mm->mmap_sem);
15405+ return;
15406+ }
15407+ up_write(&mm->mmap_sem);
15408+ }
15409+#endif
15410+
15411 tsk->thread.error_code = error_code;
15412 tsk->thread.trap_no = 13;
15413
15414@@ -304,6 +326,13 @@ gp_in_kernel:
15415 if (notify_die(DIE_GPF, "general protection fault", regs,
15416 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15417 return;
15418+
15419+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15420+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15421+ die("PAX: suspicious general protection fault", regs, error_code);
15422+ else
15423+#endif
15424+
15425 die("general protection fault", regs, error_code);
15426 }
15427
15428@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15429 dotraplinkage notrace __kprobes void
15430 do_nmi(struct pt_regs *regs, long error_code)
15431 {
15432+
15433+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15434+ if (!user_mode(regs)) {
15435+ unsigned long cs = regs->cs & 0xFFFF;
15436+ unsigned long ip = ktva_ktla(regs->ip);
15437+
15438+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15439+ regs->ip = ip;
15440+ }
15441+#endif
15442+
15443 nmi_enter();
15444
15445 inc_irq_stat(__nmi_count);
15446@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15447 /* It's safe to allow irq's after DR6 has been saved */
15448 preempt_conditional_sti(regs);
15449
15450- if (regs->flags & X86_VM_MASK) {
15451+ if (v8086_mode(regs)) {
15452 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15453 error_code, 1);
15454 preempt_conditional_cli(regs);
15455@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15456 * We already checked v86 mode above, so we can check for kernel mode
15457 * by just checking the CPL of CS.
15458 */
15459- if ((dr6 & DR_STEP) && !user_mode(regs)) {
15460+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15461 tsk->thread.debugreg6 &= ~DR_STEP;
15462 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15463 regs->flags &= ~X86_EFLAGS_TF;
15464@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15465 return;
15466 conditional_sti(regs);
15467
15468- if (!user_mode_vm(regs))
15469+ if (!user_mode(regs))
15470 {
15471 if (!fixup_exception(regs)) {
15472 task->thread.error_code = error_code;
15473@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15474 void __math_state_restore(void)
15475 {
15476 struct thread_info *thread = current_thread_info();
15477- struct task_struct *tsk = thread->task;
15478+ struct task_struct *tsk = current;
15479
15480 /*
15481 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15482@@ -750,8 +790,7 @@ void __math_state_restore(void)
15483 */
15484 asmlinkage void math_state_restore(void)
15485 {
15486- struct thread_info *thread = current_thread_info();
15487- struct task_struct *tsk = thread->task;
15488+ struct task_struct *tsk = current;
15489
15490 if (!tsk_used_math(tsk)) {
15491 local_irq_enable();
15492diff -urNp linux-3.0.4/arch/x86/kernel/verify_cpu.S linux-3.0.4/arch/x86/kernel/verify_cpu.S
15493--- linux-3.0.4/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
15494+++ linux-3.0.4/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
15495@@ -20,6 +20,7 @@
15496 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15497 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15498 * arch/x86/kernel/head_32.S: processor startup
15499+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15500 *
15501 * verify_cpu, returns the status of longmode and SSE in register %eax.
15502 * 0: Success 1: Failure
15503diff -urNp linux-3.0.4/arch/x86/kernel/vm86_32.c linux-3.0.4/arch/x86/kernel/vm86_32.c
15504--- linux-3.0.4/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
15505+++ linux-3.0.4/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
15506@@ -41,6 +41,7 @@
15507 #include <linux/ptrace.h>
15508 #include <linux/audit.h>
15509 #include <linux/stddef.h>
15510+#include <linux/grsecurity.h>
15511
15512 #include <asm/uaccess.h>
15513 #include <asm/io.h>
15514@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15515 do_exit(SIGSEGV);
15516 }
15517
15518- tss = &per_cpu(init_tss, get_cpu());
15519+ tss = init_tss + get_cpu();
15520 current->thread.sp0 = current->thread.saved_sp0;
15521 current->thread.sysenter_cs = __KERNEL_CS;
15522 load_sp0(tss, &current->thread);
15523@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15524 struct task_struct *tsk;
15525 int tmp, ret = -EPERM;
15526
15527+#ifdef CONFIG_GRKERNSEC_VM86
15528+ if (!capable(CAP_SYS_RAWIO)) {
15529+ gr_handle_vm86();
15530+ goto out;
15531+ }
15532+#endif
15533+
15534 tsk = current;
15535 if (tsk->thread.saved_sp0)
15536 goto out;
15537@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15538 int tmp, ret;
15539 struct vm86plus_struct __user *v86;
15540
15541+#ifdef CONFIG_GRKERNSEC_VM86
15542+ if (!capable(CAP_SYS_RAWIO)) {
15543+ gr_handle_vm86();
15544+ ret = -EPERM;
15545+ goto out;
15546+ }
15547+#endif
15548+
15549 tsk = current;
15550 switch (cmd) {
15551 case VM86_REQUEST_IRQ:
15552@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15553 tsk->thread.saved_fs = info->regs32->fs;
15554 tsk->thread.saved_gs = get_user_gs(info->regs32);
15555
15556- tss = &per_cpu(init_tss, get_cpu());
15557+ tss = init_tss + get_cpu();
15558 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15559 if (cpu_has_sep)
15560 tsk->thread.sysenter_cs = 0;
15561@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15562 goto cannot_handle;
15563 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15564 goto cannot_handle;
15565- intr_ptr = (unsigned long __user *) (i << 2);
15566+ intr_ptr = (__force unsigned long __user *) (i << 2);
15567 if (get_user(segoffs, intr_ptr))
15568 goto cannot_handle;
15569 if ((segoffs >> 16) == BIOSSEG)
15570diff -urNp linux-3.0.4/arch/x86/kernel/vmlinux.lds.S linux-3.0.4/arch/x86/kernel/vmlinux.lds.S
15571--- linux-3.0.4/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
15572+++ linux-3.0.4/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
15573@@ -26,6 +26,13 @@
15574 #include <asm/page_types.h>
15575 #include <asm/cache.h>
15576 #include <asm/boot.h>
15577+#include <asm/segment.h>
15578+
15579+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15580+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15581+#else
15582+#define __KERNEL_TEXT_OFFSET 0
15583+#endif
15584
15585 #undef i386 /* in case the preprocessor is a 32bit one */
15586
15587@@ -69,31 +76,46 @@ jiffies_64 = jiffies;
15588
15589 PHDRS {
15590 text PT_LOAD FLAGS(5); /* R_E */
15591+#ifdef CONFIG_X86_32
15592+ module PT_LOAD FLAGS(5); /* R_E */
15593+#endif
15594+#ifdef CONFIG_XEN
15595+ rodata PT_LOAD FLAGS(5); /* R_E */
15596+#else
15597+ rodata PT_LOAD FLAGS(4); /* R__ */
15598+#endif
15599 data PT_LOAD FLAGS(6); /* RW_ */
15600 #ifdef CONFIG_X86_64
15601 user PT_LOAD FLAGS(5); /* R_E */
15602+#endif
15603+ init.begin PT_LOAD FLAGS(6); /* RW_ */
15604 #ifdef CONFIG_SMP
15605 percpu PT_LOAD FLAGS(6); /* RW_ */
15606 #endif
15607+ text.init PT_LOAD FLAGS(5); /* R_E */
15608+ text.exit PT_LOAD FLAGS(5); /* R_E */
15609 init PT_LOAD FLAGS(7); /* RWE */
15610-#endif
15611 note PT_NOTE FLAGS(0); /* ___ */
15612 }
15613
15614 SECTIONS
15615 {
15616 #ifdef CONFIG_X86_32
15617- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15618- phys_startup_32 = startup_32 - LOAD_OFFSET;
15619+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15620 #else
15621- . = __START_KERNEL;
15622- phys_startup_64 = startup_64 - LOAD_OFFSET;
15623+ . = __START_KERNEL;
15624 #endif
15625
15626 /* Text and read-only data */
15627- .text : AT(ADDR(.text) - LOAD_OFFSET) {
15628- _text = .;
15629+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15630 /* bootstrapping code */
15631+#ifdef CONFIG_X86_32
15632+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15633+#else
15634+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15635+#endif
15636+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15637+ _text = .;
15638 HEAD_TEXT
15639 #ifdef CONFIG_X86_32
15640 . = ALIGN(PAGE_SIZE);
15641@@ -109,13 +131,47 @@ SECTIONS
15642 IRQENTRY_TEXT
15643 *(.fixup)
15644 *(.gnu.warning)
15645- /* End of text section */
15646- _etext = .;
15647 } :text = 0x9090
15648
15649- NOTES :text :note
15650+ . += __KERNEL_TEXT_OFFSET;
15651+
15652+#ifdef CONFIG_X86_32
15653+ . = ALIGN(PAGE_SIZE);
15654+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15655+
15656+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15657+ MODULES_EXEC_VADDR = .;
15658+ BYTE(0)
15659+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15660+ . = ALIGN(HPAGE_SIZE);
15661+ MODULES_EXEC_END = . - 1;
15662+#endif
15663+
15664+ } :module
15665+#endif
15666+
15667+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15668+ /* End of text section */
15669+ _etext = . - __KERNEL_TEXT_OFFSET;
15670+ }
15671+
15672+#ifdef CONFIG_X86_32
15673+ . = ALIGN(PAGE_SIZE);
15674+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15675+ *(.idt)
15676+ . = ALIGN(PAGE_SIZE);
15677+ *(.empty_zero_page)
15678+ *(.initial_pg_fixmap)
15679+ *(.initial_pg_pmd)
15680+ *(.initial_page_table)
15681+ *(.swapper_pg_dir)
15682+ } :rodata
15683+#endif
15684+
15685+ . = ALIGN(PAGE_SIZE);
15686+ NOTES :rodata :note
15687
15688- EXCEPTION_TABLE(16) :text = 0x9090
15689+ EXCEPTION_TABLE(16) :rodata
15690
15691 #if defined(CONFIG_DEBUG_RODATA)
15692 /* .text should occupy whole number of pages */
15693@@ -127,16 +183,20 @@ SECTIONS
15694
15695 /* Data */
15696 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15697+
15698+#ifdef CONFIG_PAX_KERNEXEC
15699+ . = ALIGN(HPAGE_SIZE);
15700+#else
15701+ . = ALIGN(PAGE_SIZE);
15702+#endif
15703+
15704 /* Start of data section */
15705 _sdata = .;
15706
15707 /* init_task */
15708 INIT_TASK_DATA(THREAD_SIZE)
15709
15710-#ifdef CONFIG_X86_32
15711- /* 32 bit has nosave before _edata */
15712 NOSAVE_DATA
15713-#endif
15714
15715 PAGE_ALIGNED_DATA(PAGE_SIZE)
15716
15717@@ -208,12 +268,19 @@ SECTIONS
15718 #endif /* CONFIG_X86_64 */
15719
15720 /* Init code and data - will be freed after init */
15721- . = ALIGN(PAGE_SIZE);
15722 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15723+ BYTE(0)
15724+
15725+#ifdef CONFIG_PAX_KERNEXEC
15726+ . = ALIGN(HPAGE_SIZE);
15727+#else
15728+ . = ALIGN(PAGE_SIZE);
15729+#endif
15730+
15731 __init_begin = .; /* paired with __init_end */
15732- }
15733+ } :init.begin
15734
15735-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15736+#ifdef CONFIG_SMP
15737 /*
15738 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15739 * output PHDR, so the next output section - .init.text - should
15740@@ -222,12 +289,27 @@ SECTIONS
15741 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15742 #endif
15743
15744- INIT_TEXT_SECTION(PAGE_SIZE)
15745-#ifdef CONFIG_X86_64
15746- :init
15747-#endif
15748+ . = ALIGN(PAGE_SIZE);
15749+ init_begin = .;
15750+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15751+ VMLINUX_SYMBOL(_sinittext) = .;
15752+ INIT_TEXT
15753+ VMLINUX_SYMBOL(_einittext) = .;
15754+ . = ALIGN(PAGE_SIZE);
15755+ } :text.init
15756
15757- INIT_DATA_SECTION(16)
15758+ /*
15759+ * .exit.text is discard at runtime, not link time, to deal with
15760+ * references from .altinstructions and .eh_frame
15761+ */
15762+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15763+ EXIT_TEXT
15764+ . = ALIGN(16);
15765+ } :text.exit
15766+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15767+
15768+ . = ALIGN(PAGE_SIZE);
15769+ INIT_DATA_SECTION(16) :init
15770
15771 /*
15772 * Code and data for a variety of lowlevel trampolines, to be
15773@@ -301,19 +383,12 @@ SECTIONS
15774 }
15775
15776 . = ALIGN(8);
15777- /*
15778- * .exit.text is discard at runtime, not link time, to deal with
15779- * references from .altinstructions and .eh_frame
15780- */
15781- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15782- EXIT_TEXT
15783- }
15784
15785 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15786 EXIT_DATA
15787 }
15788
15789-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15790+#ifndef CONFIG_SMP
15791 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
15792 #endif
15793
15794@@ -332,16 +407,10 @@ SECTIONS
15795 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15796 __smp_locks = .;
15797 *(.smp_locks)
15798- . = ALIGN(PAGE_SIZE);
15799 __smp_locks_end = .;
15800+ . = ALIGN(PAGE_SIZE);
15801 }
15802
15803-#ifdef CONFIG_X86_64
15804- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15805- NOSAVE_DATA
15806- }
15807-#endif
15808-
15809 /* BSS */
15810 . = ALIGN(PAGE_SIZE);
15811 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15812@@ -357,6 +426,7 @@ SECTIONS
15813 __brk_base = .;
15814 . += 64 * 1024; /* 64k alignment slop space */
15815 *(.brk_reservation) /* areas brk users have reserved */
15816+ . = ALIGN(HPAGE_SIZE);
15817 __brk_limit = .;
15818 }
15819
15820@@ -383,13 +453,12 @@ SECTIONS
15821 * for the boot processor.
15822 */
15823 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15824-INIT_PER_CPU(gdt_page);
15825 INIT_PER_CPU(irq_stack_union);
15826
15827 /*
15828 * Build-time check on the image size:
15829 */
15830-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15831+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15832 "kernel image bigger than KERNEL_IMAGE_SIZE");
15833
15834 #ifdef CONFIG_SMP
15835diff -urNp linux-3.0.4/arch/x86/kernel/vsyscall_64.c linux-3.0.4/arch/x86/kernel/vsyscall_64.c
15836--- linux-3.0.4/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
15837+++ linux-3.0.4/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
15838@@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
15839 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
15840 {
15841 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
15842- .sysctl_enabled = 1,
15843+ .sysctl_enabled = 0,
15844 };
15845
15846 void update_vsyscall_tz(void)
15847@@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
15848 static ctl_table kernel_table2[] = {
15849 { .procname = "vsyscall64",
15850 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
15851- .mode = 0644,
15852+ .mode = 0444,
15853 .proc_handler = proc_dointvec },
15854 {}
15855 };
15856diff -urNp linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c
15857--- linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
15858+++ linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
15859@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15860 EXPORT_SYMBOL(copy_user_generic_string);
15861 EXPORT_SYMBOL(copy_user_generic_unrolled);
15862 EXPORT_SYMBOL(__copy_user_nocache);
15863-EXPORT_SYMBOL(_copy_from_user);
15864-EXPORT_SYMBOL(_copy_to_user);
15865
15866 EXPORT_SYMBOL(copy_page);
15867 EXPORT_SYMBOL(clear_page);
15868diff -urNp linux-3.0.4/arch/x86/kernel/xsave.c linux-3.0.4/arch/x86/kernel/xsave.c
15869--- linux-3.0.4/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
15870+++ linux-3.0.4/arch/x86/kernel/xsave.c 2011-08-23 21:47:55.000000000 -0400
15871@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15872 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15873 return -EINVAL;
15874
15875- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15876+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15877 fx_sw_user->extended_size -
15878 FP_XSTATE_MAGIC2_SIZE));
15879 if (err)
15880@@ -267,7 +267,7 @@ fx_only:
15881 * the other extended state.
15882 */
15883 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15884- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15885+ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15886 }
15887
15888 /*
15889@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15890 if (use_xsave())
15891 err = restore_user_xstate(buf);
15892 else
15893- err = fxrstor_checking((__force struct i387_fxsave_struct *)
15894+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
15895 buf);
15896 if (unlikely(err)) {
15897 /*
15898diff -urNp linux-3.0.4/arch/x86/kvm/emulate.c linux-3.0.4/arch/x86/kvm/emulate.c
15899--- linux-3.0.4/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
15900+++ linux-3.0.4/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
15901@@ -96,7 +96,7 @@
15902 #define Src2ImmByte (2<<29)
15903 #define Src2One (3<<29)
15904 #define Src2Imm (4<<29)
15905-#define Src2Mask (7<<29)
15906+#define Src2Mask (7U<<29)
15907
15908 #define X2(x...) x, x
15909 #define X3(x...) X2(x), x
15910@@ -207,6 +207,7 @@ struct gprefix {
15911
15912 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
15913 do { \
15914+ unsigned long _tmp; \
15915 __asm__ __volatile__ ( \
15916 _PRE_EFLAGS("0", "4", "2") \
15917 _op _suffix " %"_x"3,%1; " \
15918@@ -220,8 +221,6 @@ struct gprefix {
15919 /* Raw emulation: instruction has two explicit operands. */
15920 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
15921 do { \
15922- unsigned long _tmp; \
15923- \
15924 switch ((_dst).bytes) { \
15925 case 2: \
15926 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
15927@@ -237,7 +236,6 @@ struct gprefix {
15928
15929 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
15930 do { \
15931- unsigned long _tmp; \
15932 switch ((_dst).bytes) { \
15933 case 1: \
15934 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
15935diff -urNp linux-3.0.4/arch/x86/kvm/lapic.c linux-3.0.4/arch/x86/kvm/lapic.c
15936--- linux-3.0.4/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
15937+++ linux-3.0.4/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
15938@@ -53,7 +53,7 @@
15939 #define APIC_BUS_CYCLE_NS 1
15940
15941 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
15942-#define apic_debug(fmt, arg...)
15943+#define apic_debug(fmt, arg...) do {} while (0)
15944
15945 #define APIC_LVT_NUM 6
15946 /* 14 is the version for Xeon and Pentium 8.4.8*/
15947diff -urNp linux-3.0.4/arch/x86/kvm/mmu.c linux-3.0.4/arch/x86/kvm/mmu.c
15948--- linux-3.0.4/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
15949+++ linux-3.0.4/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
15950@@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15951
15952 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
15953
15954- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
15955+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
15956
15957 /*
15958 * Assume that the pte write on a page table of the same type
15959@@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15960 }
15961
15962 spin_lock(&vcpu->kvm->mmu_lock);
15963- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15964+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15965 gentry = 0;
15966 kvm_mmu_free_some_pages(vcpu);
15967 ++vcpu->kvm->stat.mmu_pte_write;
15968diff -urNp linux-3.0.4/arch/x86/kvm/paging_tmpl.h linux-3.0.4/arch/x86/kvm/paging_tmpl.h
15969--- linux-3.0.4/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
15970+++ linux-3.0.4/arch/x86/kvm/paging_tmpl.h 2011-08-23 21:48:14.000000000 -0400
15971@@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
15972 unsigned long mmu_seq;
15973 bool map_writable;
15974
15975+ pax_track_stack();
15976+
15977 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
15978
15979 r = mmu_topup_memory_caches(vcpu);
15980@@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
15981 if (need_flush)
15982 kvm_flush_remote_tlbs(vcpu->kvm);
15983
15984- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
15985+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
15986
15987 spin_unlock(&vcpu->kvm->mmu_lock);
15988
15989diff -urNp linux-3.0.4/arch/x86/kvm/svm.c linux-3.0.4/arch/x86/kvm/svm.c
15990--- linux-3.0.4/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
15991+++ linux-3.0.4/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
15992@@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
15993 int cpu = raw_smp_processor_id();
15994
15995 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
15996+
15997+ pax_open_kernel();
15998 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
15999+ pax_close_kernel();
16000+
16001 load_TR_desc();
16002 }
16003
16004@@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16005 #endif
16006 #endif
16007
16008+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16009+ __set_fs(current_thread_info()->addr_limit);
16010+#endif
16011+
16012 reload_tss(vcpu);
16013
16014 local_irq_disable();
16015diff -urNp linux-3.0.4/arch/x86/kvm/vmx.c linux-3.0.4/arch/x86/kvm/vmx.c
16016--- linux-3.0.4/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16017+++ linux-3.0.4/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16018@@ -797,7 +797,11 @@ static void reload_tss(void)
16019 struct desc_struct *descs;
16020
16021 descs = (void *)gdt->address;
16022+
16023+ pax_open_kernel();
16024 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16025+ pax_close_kernel();
16026+
16027 load_TR_desc();
16028 }
16029
16030@@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16031 if (!cpu_has_vmx_flexpriority())
16032 flexpriority_enabled = 0;
16033
16034- if (!cpu_has_vmx_tpr_shadow())
16035- kvm_x86_ops->update_cr8_intercept = NULL;
16036+ if (!cpu_has_vmx_tpr_shadow()) {
16037+ pax_open_kernel();
16038+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16039+ pax_close_kernel();
16040+ }
16041
16042 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16043 kvm_disable_largepages();
16044@@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16045 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16046
16047 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16048- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16049+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16050 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16051 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16052 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16053@@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16054 "jmp .Lkvm_vmx_return \n\t"
16055 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16056 ".Lkvm_vmx_return: "
16057+
16058+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16059+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16060+ ".Lkvm_vmx_return2: "
16061+#endif
16062+
16063 /* Save guest registers, load host registers, keep flags */
16064 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16065 "pop %0 \n\t"
16066@@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16067 #endif
16068 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16069 [wordsize]"i"(sizeof(ulong))
16070+
16071+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16072+ ,[cs]"i"(__KERNEL_CS)
16073+#endif
16074+
16075 : "cc", "memory"
16076 , R"ax", R"bx", R"di", R"si"
16077 #ifdef CONFIG_X86_64
16078@@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16079
16080 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16081
16082- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16083+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16084+
16085+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16086+ loadsegment(fs, __KERNEL_PERCPU);
16087+#endif
16088+
16089+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16090+ __set_fs(current_thread_info()->addr_limit);
16091+#endif
16092+
16093 vmx->launched = 1;
16094
16095 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16096diff -urNp linux-3.0.4/arch/x86/kvm/x86.c linux-3.0.4/arch/x86/kvm/x86.c
16097--- linux-3.0.4/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
16098+++ linux-3.0.4/arch/x86/kvm/x86.c 2011-08-23 21:47:55.000000000 -0400
16099@@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16100 if (n < msr_list.nmsrs)
16101 goto out;
16102 r = -EFAULT;
16103+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16104+ goto out;
16105 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16106 num_msrs_to_save * sizeof(u32)))
16107 goto out;
16108@@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16109 struct kvm_cpuid2 *cpuid,
16110 struct kvm_cpuid_entry2 __user *entries)
16111 {
16112- int r;
16113+ int r, i;
16114
16115 r = -E2BIG;
16116 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16117 goto out;
16118 r = -EFAULT;
16119- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16120- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16121+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16122 goto out;
16123+ for (i = 0; i < cpuid->nent; ++i) {
16124+ struct kvm_cpuid_entry2 cpuid_entry;
16125+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16126+ goto out;
16127+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
16128+ }
16129 vcpu->arch.cpuid_nent = cpuid->nent;
16130 kvm_apic_set_version(vcpu);
16131 kvm_x86_ops->cpuid_update(vcpu);
16132@@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16133 struct kvm_cpuid2 *cpuid,
16134 struct kvm_cpuid_entry2 __user *entries)
16135 {
16136- int r;
16137+ int r, i;
16138
16139 r = -E2BIG;
16140 if (cpuid->nent < vcpu->arch.cpuid_nent)
16141 goto out;
16142 r = -EFAULT;
16143- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16144- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16145+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16146 goto out;
16147+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16148+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16149+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16150+ goto out;
16151+ }
16152 return 0;
16153
16154 out:
16155@@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16156 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16157 struct kvm_interrupt *irq)
16158 {
16159- if (irq->irq < 0 || irq->irq >= 256)
16160+ if (irq->irq >= 256)
16161 return -EINVAL;
16162 if (irqchip_in_kernel(vcpu->kvm))
16163 return -ENXIO;
16164@@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16165 }
16166 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16167
16168-int kvm_arch_init(void *opaque)
16169+int kvm_arch_init(const void *opaque)
16170 {
16171 int r;
16172 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16173diff -urNp linux-3.0.4/arch/x86/lguest/boot.c linux-3.0.4/arch/x86/lguest/boot.c
16174--- linux-3.0.4/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
16175+++ linux-3.0.4/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
16176@@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16177 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16178 * Launcher to reboot us.
16179 */
16180-static void lguest_restart(char *reason)
16181+static __noreturn void lguest_restart(char *reason)
16182 {
16183 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16184+ BUG();
16185 }
16186
16187 /*G:050
16188diff -urNp linux-3.0.4/arch/x86/lib/atomic64_32.c linux-3.0.4/arch/x86/lib/atomic64_32.c
16189--- linux-3.0.4/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
16190+++ linux-3.0.4/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
16191@@ -8,18 +8,30 @@
16192
16193 long long atomic64_read_cx8(long long, const atomic64_t *v);
16194 EXPORT_SYMBOL(atomic64_read_cx8);
16195+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16196+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16197 long long atomic64_set_cx8(long long, const atomic64_t *v);
16198 EXPORT_SYMBOL(atomic64_set_cx8);
16199+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16200+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16201 long long atomic64_xchg_cx8(long long, unsigned high);
16202 EXPORT_SYMBOL(atomic64_xchg_cx8);
16203 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16204 EXPORT_SYMBOL(atomic64_add_return_cx8);
16205+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16206+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16207 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16208 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16209+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16210+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16211 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16212 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16213+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16214+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16215 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16216 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16217+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16218+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16219 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16220 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16221 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16222@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16223 #ifndef CONFIG_X86_CMPXCHG64
16224 long long atomic64_read_386(long long, const atomic64_t *v);
16225 EXPORT_SYMBOL(atomic64_read_386);
16226+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16227+EXPORT_SYMBOL(atomic64_read_unchecked_386);
16228 long long atomic64_set_386(long long, const atomic64_t *v);
16229 EXPORT_SYMBOL(atomic64_set_386);
16230+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16231+EXPORT_SYMBOL(atomic64_set_unchecked_386);
16232 long long atomic64_xchg_386(long long, unsigned high);
16233 EXPORT_SYMBOL(atomic64_xchg_386);
16234 long long atomic64_add_return_386(long long a, atomic64_t *v);
16235 EXPORT_SYMBOL(atomic64_add_return_386);
16236+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16237+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16238 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16239 EXPORT_SYMBOL(atomic64_sub_return_386);
16240+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16241+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16242 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16243 EXPORT_SYMBOL(atomic64_inc_return_386);
16244+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16245+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16246 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16247 EXPORT_SYMBOL(atomic64_dec_return_386);
16248+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16249+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16250 long long atomic64_add_386(long long a, atomic64_t *v);
16251 EXPORT_SYMBOL(atomic64_add_386);
16252+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16253+EXPORT_SYMBOL(atomic64_add_unchecked_386);
16254 long long atomic64_sub_386(long long a, atomic64_t *v);
16255 EXPORT_SYMBOL(atomic64_sub_386);
16256+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16257+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16258 long long atomic64_inc_386(long long a, atomic64_t *v);
16259 EXPORT_SYMBOL(atomic64_inc_386);
16260+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16261+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16262 long long atomic64_dec_386(long long a, atomic64_t *v);
16263 EXPORT_SYMBOL(atomic64_dec_386);
16264+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16265+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16266 long long atomic64_dec_if_positive_386(atomic64_t *v);
16267 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16268 int atomic64_inc_not_zero_386(atomic64_t *v);
16269diff -urNp linux-3.0.4/arch/x86/lib/atomic64_386_32.S linux-3.0.4/arch/x86/lib/atomic64_386_32.S
16270--- linux-3.0.4/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
16271+++ linux-3.0.4/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
16272@@ -48,6 +48,10 @@ BEGIN(read)
16273 movl (v), %eax
16274 movl 4(v), %edx
16275 RET_ENDP
16276+BEGIN(read_unchecked)
16277+ movl (v), %eax
16278+ movl 4(v), %edx
16279+RET_ENDP
16280 #undef v
16281
16282 #define v %esi
16283@@ -55,6 +59,10 @@ BEGIN(set)
16284 movl %ebx, (v)
16285 movl %ecx, 4(v)
16286 RET_ENDP
16287+BEGIN(set_unchecked)
16288+ movl %ebx, (v)
16289+ movl %ecx, 4(v)
16290+RET_ENDP
16291 #undef v
16292
16293 #define v %esi
16294@@ -70,6 +78,20 @@ RET_ENDP
16295 BEGIN(add)
16296 addl %eax, (v)
16297 adcl %edx, 4(v)
16298+
16299+#ifdef CONFIG_PAX_REFCOUNT
16300+ jno 0f
16301+ subl %eax, (v)
16302+ sbbl %edx, 4(v)
16303+ int $4
16304+0:
16305+ _ASM_EXTABLE(0b, 0b)
16306+#endif
16307+
16308+RET_ENDP
16309+BEGIN(add_unchecked)
16310+ addl %eax, (v)
16311+ adcl %edx, 4(v)
16312 RET_ENDP
16313 #undef v
16314
16315@@ -77,6 +99,24 @@ RET_ENDP
16316 BEGIN(add_return)
16317 addl (v), %eax
16318 adcl 4(v), %edx
16319+
16320+#ifdef CONFIG_PAX_REFCOUNT
16321+ into
16322+1234:
16323+ _ASM_EXTABLE(1234b, 2f)
16324+#endif
16325+
16326+ movl %eax, (v)
16327+ movl %edx, 4(v)
16328+
16329+#ifdef CONFIG_PAX_REFCOUNT
16330+2:
16331+#endif
16332+
16333+RET_ENDP
16334+BEGIN(add_return_unchecked)
16335+ addl (v), %eax
16336+ adcl 4(v), %edx
16337 movl %eax, (v)
16338 movl %edx, 4(v)
16339 RET_ENDP
16340@@ -86,6 +126,20 @@ RET_ENDP
16341 BEGIN(sub)
16342 subl %eax, (v)
16343 sbbl %edx, 4(v)
16344+
16345+#ifdef CONFIG_PAX_REFCOUNT
16346+ jno 0f
16347+ addl %eax, (v)
16348+ adcl %edx, 4(v)
16349+ int $4
16350+0:
16351+ _ASM_EXTABLE(0b, 0b)
16352+#endif
16353+
16354+RET_ENDP
16355+BEGIN(sub_unchecked)
16356+ subl %eax, (v)
16357+ sbbl %edx, 4(v)
16358 RET_ENDP
16359 #undef v
16360
16361@@ -96,6 +150,27 @@ BEGIN(sub_return)
16362 sbbl $0, %edx
16363 addl (v), %eax
16364 adcl 4(v), %edx
16365+
16366+#ifdef CONFIG_PAX_REFCOUNT
16367+ into
16368+1234:
16369+ _ASM_EXTABLE(1234b, 2f)
16370+#endif
16371+
16372+ movl %eax, (v)
16373+ movl %edx, 4(v)
16374+
16375+#ifdef CONFIG_PAX_REFCOUNT
16376+2:
16377+#endif
16378+
16379+RET_ENDP
16380+BEGIN(sub_return_unchecked)
16381+ negl %edx
16382+ negl %eax
16383+ sbbl $0, %edx
16384+ addl (v), %eax
16385+ adcl 4(v), %edx
16386 movl %eax, (v)
16387 movl %edx, 4(v)
16388 RET_ENDP
16389@@ -105,6 +180,20 @@ RET_ENDP
16390 BEGIN(inc)
16391 addl $1, (v)
16392 adcl $0, 4(v)
16393+
16394+#ifdef CONFIG_PAX_REFCOUNT
16395+ jno 0f
16396+ subl $1, (v)
16397+ sbbl $0, 4(v)
16398+ int $4
16399+0:
16400+ _ASM_EXTABLE(0b, 0b)
16401+#endif
16402+
16403+RET_ENDP
16404+BEGIN(inc_unchecked)
16405+ addl $1, (v)
16406+ adcl $0, 4(v)
16407 RET_ENDP
16408 #undef v
16409
16410@@ -114,6 +203,26 @@ BEGIN(inc_return)
16411 movl 4(v), %edx
16412 addl $1, %eax
16413 adcl $0, %edx
16414+
16415+#ifdef CONFIG_PAX_REFCOUNT
16416+ into
16417+1234:
16418+ _ASM_EXTABLE(1234b, 2f)
16419+#endif
16420+
16421+ movl %eax, (v)
16422+ movl %edx, 4(v)
16423+
16424+#ifdef CONFIG_PAX_REFCOUNT
16425+2:
16426+#endif
16427+
16428+RET_ENDP
16429+BEGIN(inc_return_unchecked)
16430+ movl (v), %eax
16431+ movl 4(v), %edx
16432+ addl $1, %eax
16433+ adcl $0, %edx
16434 movl %eax, (v)
16435 movl %edx, 4(v)
16436 RET_ENDP
16437@@ -123,6 +232,20 @@ RET_ENDP
16438 BEGIN(dec)
16439 subl $1, (v)
16440 sbbl $0, 4(v)
16441+
16442+#ifdef CONFIG_PAX_REFCOUNT
16443+ jno 0f
16444+ addl $1, (v)
16445+ adcl $0, 4(v)
16446+ int $4
16447+0:
16448+ _ASM_EXTABLE(0b, 0b)
16449+#endif
16450+
16451+RET_ENDP
16452+BEGIN(dec_unchecked)
16453+ subl $1, (v)
16454+ sbbl $0, 4(v)
16455 RET_ENDP
16456 #undef v
16457
16458@@ -132,6 +255,26 @@ BEGIN(dec_return)
16459 movl 4(v), %edx
16460 subl $1, %eax
16461 sbbl $0, %edx
16462+
16463+#ifdef CONFIG_PAX_REFCOUNT
16464+ into
16465+1234:
16466+ _ASM_EXTABLE(1234b, 2f)
16467+#endif
16468+
16469+ movl %eax, (v)
16470+ movl %edx, 4(v)
16471+
16472+#ifdef CONFIG_PAX_REFCOUNT
16473+2:
16474+#endif
16475+
16476+RET_ENDP
16477+BEGIN(dec_return_unchecked)
16478+ movl (v), %eax
16479+ movl 4(v), %edx
16480+ subl $1, %eax
16481+ sbbl $0, %edx
16482 movl %eax, (v)
16483 movl %edx, 4(v)
16484 RET_ENDP
16485@@ -143,6 +286,13 @@ BEGIN(add_unless)
16486 adcl %edx, %edi
16487 addl (v), %eax
16488 adcl 4(v), %edx
16489+
16490+#ifdef CONFIG_PAX_REFCOUNT
16491+ into
16492+1234:
16493+ _ASM_EXTABLE(1234b, 2f)
16494+#endif
16495+
16496 cmpl %eax, %esi
16497 je 3f
16498 1:
16499@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16500 1:
16501 addl $1, %eax
16502 adcl $0, %edx
16503+
16504+#ifdef CONFIG_PAX_REFCOUNT
16505+ into
16506+1234:
16507+ _ASM_EXTABLE(1234b, 2f)
16508+#endif
16509+
16510 movl %eax, (v)
16511 movl %edx, 4(v)
16512 movl $1, %eax
16513@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16514 movl 4(v), %edx
16515 subl $1, %eax
16516 sbbl $0, %edx
16517+
16518+#ifdef CONFIG_PAX_REFCOUNT
16519+ into
16520+1234:
16521+ _ASM_EXTABLE(1234b, 1f)
16522+#endif
16523+
16524 js 1f
16525 movl %eax, (v)
16526 movl %edx, 4(v)
16527diff -urNp linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S
16528--- linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
16529+++ linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S 2011-08-23 21:47:55.000000000 -0400
16530@@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
16531 CFI_ENDPROC
16532 ENDPROC(atomic64_read_cx8)
16533
16534+ENTRY(atomic64_read_unchecked_cx8)
16535+ CFI_STARTPROC
16536+
16537+ read64 %ecx
16538+ ret
16539+ CFI_ENDPROC
16540+ENDPROC(atomic64_read_unchecked_cx8)
16541+
16542 ENTRY(atomic64_set_cx8)
16543 CFI_STARTPROC
16544
16545@@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
16546 CFI_ENDPROC
16547 ENDPROC(atomic64_set_cx8)
16548
16549+ENTRY(atomic64_set_unchecked_cx8)
16550+ CFI_STARTPROC
16551+
16552+1:
16553+/* we don't need LOCK_PREFIX since aligned 64-bit writes
16554+ * are atomic on 586 and newer */
16555+ cmpxchg8b (%esi)
16556+ jne 1b
16557+
16558+ ret
16559+ CFI_ENDPROC
16560+ENDPROC(atomic64_set_unchecked_cx8)
16561+
16562 ENTRY(atomic64_xchg_cx8)
16563 CFI_STARTPROC
16564
16565@@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
16566 CFI_ENDPROC
16567 ENDPROC(atomic64_xchg_cx8)
16568
16569-.macro addsub_return func ins insc
16570-ENTRY(atomic64_\func\()_return_cx8)
16571+.macro addsub_return func ins insc unchecked=""
16572+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16573 CFI_STARTPROC
16574 SAVE ebp
16575 SAVE ebx
16576@@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
16577 movl %edx, %ecx
16578 \ins\()l %esi, %ebx
16579 \insc\()l %edi, %ecx
16580+
16581+.ifb \unchecked
16582+#ifdef CONFIG_PAX_REFCOUNT
16583+ into
16584+2:
16585+ _ASM_EXTABLE(2b, 3f)
16586+#endif
16587+.endif
16588+
16589 LOCK_PREFIX
16590 cmpxchg8b (%ebp)
16591 jne 1b
16592-
16593-10:
16594 movl %ebx, %eax
16595 movl %ecx, %edx
16596+
16597+.ifb \unchecked
16598+#ifdef CONFIG_PAX_REFCOUNT
16599+3:
16600+#endif
16601+.endif
16602+
16603 RESTORE edi
16604 RESTORE esi
16605 RESTORE ebx
16606 RESTORE ebp
16607 ret
16608 CFI_ENDPROC
16609-ENDPROC(atomic64_\func\()_return_cx8)
16610+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16611 .endm
16612
16613 addsub_return add add adc
16614 addsub_return sub sub sbb
16615+addsub_return add add adc _unchecked
16616+addsub_return sub sub sbb _unchecked
16617
16618-.macro incdec_return func ins insc
16619-ENTRY(atomic64_\func\()_return_cx8)
16620+.macro incdec_return func ins insc unchecked
16621+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16622 CFI_STARTPROC
16623 SAVE ebx
16624
16625@@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
16626 movl %edx, %ecx
16627 \ins\()l $1, %ebx
16628 \insc\()l $0, %ecx
16629+
16630+.ifb \unchecked
16631+#ifdef CONFIG_PAX_REFCOUNT
16632+ into
16633+2:
16634+ _ASM_EXTABLE(2b, 3f)
16635+#endif
16636+.endif
16637+
16638 LOCK_PREFIX
16639 cmpxchg8b (%esi)
16640 jne 1b
16641
16642-10:
16643 movl %ebx, %eax
16644 movl %ecx, %edx
16645+
16646+.ifb \unchecked
16647+#ifdef CONFIG_PAX_REFCOUNT
16648+3:
16649+#endif
16650+.endif
16651+
16652 RESTORE ebx
16653 ret
16654 CFI_ENDPROC
16655-ENDPROC(atomic64_\func\()_return_cx8)
16656+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16657 .endm
16658
16659 incdec_return inc add adc
16660 incdec_return dec sub sbb
16661+incdec_return inc add adc _unchecked
16662+incdec_return dec sub sbb _unchecked
16663
16664 ENTRY(atomic64_dec_if_positive_cx8)
16665 CFI_STARTPROC
16666@@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16667 movl %edx, %ecx
16668 subl $1, %ebx
16669 sbb $0, %ecx
16670+
16671+#ifdef CONFIG_PAX_REFCOUNT
16672+ into
16673+1234:
16674+ _ASM_EXTABLE(1234b, 2f)
16675+#endif
16676+
16677 js 2f
16678 LOCK_PREFIX
16679 cmpxchg8b (%esi)
16680@@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
16681 movl %edx, %ecx
16682 addl %esi, %ebx
16683 adcl %edi, %ecx
16684+
16685+#ifdef CONFIG_PAX_REFCOUNT
16686+ into
16687+1234:
16688+ _ASM_EXTABLE(1234b, 3f)
16689+#endif
16690+
16691 LOCK_PREFIX
16692 cmpxchg8b (%ebp)
16693 jne 1b
16694@@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16695 movl %edx, %ecx
16696 addl $1, %ebx
16697 adcl $0, %ecx
16698+
16699+#ifdef CONFIG_PAX_REFCOUNT
16700+ into
16701+1234:
16702+ _ASM_EXTABLE(1234b, 3f)
16703+#endif
16704+
16705 LOCK_PREFIX
16706 cmpxchg8b (%esi)
16707 jne 1b
16708diff -urNp linux-3.0.4/arch/x86/lib/checksum_32.S linux-3.0.4/arch/x86/lib/checksum_32.S
16709--- linux-3.0.4/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
16710+++ linux-3.0.4/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
16711@@ -28,7 +28,8 @@
16712 #include <linux/linkage.h>
16713 #include <asm/dwarf2.h>
16714 #include <asm/errno.h>
16715-
16716+#include <asm/segment.h>
16717+
16718 /*
16719 * computes a partial checksum, e.g. for TCP/UDP fragments
16720 */
16721@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16722
16723 #define ARGBASE 16
16724 #define FP 12
16725-
16726-ENTRY(csum_partial_copy_generic)
16727+
16728+ENTRY(csum_partial_copy_generic_to_user)
16729 CFI_STARTPROC
16730+
16731+#ifdef CONFIG_PAX_MEMORY_UDEREF
16732+ pushl_cfi %gs
16733+ popl_cfi %es
16734+ jmp csum_partial_copy_generic
16735+#endif
16736+
16737+ENTRY(csum_partial_copy_generic_from_user)
16738+
16739+#ifdef CONFIG_PAX_MEMORY_UDEREF
16740+ pushl_cfi %gs
16741+ popl_cfi %ds
16742+#endif
16743+
16744+ENTRY(csum_partial_copy_generic)
16745 subl $4,%esp
16746 CFI_ADJUST_CFA_OFFSET 4
16747 pushl_cfi %edi
16748@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16749 jmp 4f
16750 SRC(1: movw (%esi), %bx )
16751 addl $2, %esi
16752-DST( movw %bx, (%edi) )
16753+DST( movw %bx, %es:(%edi) )
16754 addl $2, %edi
16755 addw %bx, %ax
16756 adcl $0, %eax
16757@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16758 SRC(1: movl (%esi), %ebx )
16759 SRC( movl 4(%esi), %edx )
16760 adcl %ebx, %eax
16761-DST( movl %ebx, (%edi) )
16762+DST( movl %ebx, %es:(%edi) )
16763 adcl %edx, %eax
16764-DST( movl %edx, 4(%edi) )
16765+DST( movl %edx, %es:4(%edi) )
16766
16767 SRC( movl 8(%esi), %ebx )
16768 SRC( movl 12(%esi), %edx )
16769 adcl %ebx, %eax
16770-DST( movl %ebx, 8(%edi) )
16771+DST( movl %ebx, %es:8(%edi) )
16772 adcl %edx, %eax
16773-DST( movl %edx, 12(%edi) )
16774+DST( movl %edx, %es:12(%edi) )
16775
16776 SRC( movl 16(%esi), %ebx )
16777 SRC( movl 20(%esi), %edx )
16778 adcl %ebx, %eax
16779-DST( movl %ebx, 16(%edi) )
16780+DST( movl %ebx, %es:16(%edi) )
16781 adcl %edx, %eax
16782-DST( movl %edx, 20(%edi) )
16783+DST( movl %edx, %es:20(%edi) )
16784
16785 SRC( movl 24(%esi), %ebx )
16786 SRC( movl 28(%esi), %edx )
16787 adcl %ebx, %eax
16788-DST( movl %ebx, 24(%edi) )
16789+DST( movl %ebx, %es:24(%edi) )
16790 adcl %edx, %eax
16791-DST( movl %edx, 28(%edi) )
16792+DST( movl %edx, %es:28(%edi) )
16793
16794 lea 32(%esi), %esi
16795 lea 32(%edi), %edi
16796@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16797 shrl $2, %edx # This clears CF
16798 SRC(3: movl (%esi), %ebx )
16799 adcl %ebx, %eax
16800-DST( movl %ebx, (%edi) )
16801+DST( movl %ebx, %es:(%edi) )
16802 lea 4(%esi), %esi
16803 lea 4(%edi), %edi
16804 dec %edx
16805@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16806 jb 5f
16807 SRC( movw (%esi), %cx )
16808 leal 2(%esi), %esi
16809-DST( movw %cx, (%edi) )
16810+DST( movw %cx, %es:(%edi) )
16811 leal 2(%edi), %edi
16812 je 6f
16813 shll $16,%ecx
16814 SRC(5: movb (%esi), %cl )
16815-DST( movb %cl, (%edi) )
16816+DST( movb %cl, %es:(%edi) )
16817 6: addl %ecx, %eax
16818 adcl $0, %eax
16819 7:
16820@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16821
16822 6001:
16823 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16824- movl $-EFAULT, (%ebx)
16825+ movl $-EFAULT, %ss:(%ebx)
16826
16827 # zero the complete destination - computing the rest
16828 # is too much work
16829@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16830
16831 6002:
16832 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16833- movl $-EFAULT,(%ebx)
16834+ movl $-EFAULT,%ss:(%ebx)
16835 jmp 5000b
16836
16837 .previous
16838
16839+ pushl_cfi %ss
16840+ popl_cfi %ds
16841+ pushl_cfi %ss
16842+ popl_cfi %es
16843 popl_cfi %ebx
16844 CFI_RESTORE ebx
16845 popl_cfi %esi
16846@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16847 popl_cfi %ecx # equivalent to addl $4,%esp
16848 ret
16849 CFI_ENDPROC
16850-ENDPROC(csum_partial_copy_generic)
16851+ENDPROC(csum_partial_copy_generic_to_user)
16852
16853 #else
16854
16855 /* Version for PentiumII/PPro */
16856
16857 #define ROUND1(x) \
16858+ nop; nop; nop; \
16859 SRC(movl x(%esi), %ebx ) ; \
16860 addl %ebx, %eax ; \
16861- DST(movl %ebx, x(%edi) ) ;
16862+ DST(movl %ebx, %es:x(%edi)) ;
16863
16864 #define ROUND(x) \
16865+ nop; nop; nop; \
16866 SRC(movl x(%esi), %ebx ) ; \
16867 adcl %ebx, %eax ; \
16868- DST(movl %ebx, x(%edi) ) ;
16869+ DST(movl %ebx, %es:x(%edi)) ;
16870
16871 #define ARGBASE 12
16872-
16873-ENTRY(csum_partial_copy_generic)
16874+
16875+ENTRY(csum_partial_copy_generic_to_user)
16876 CFI_STARTPROC
16877+
16878+#ifdef CONFIG_PAX_MEMORY_UDEREF
16879+ pushl_cfi %gs
16880+ popl_cfi %es
16881+ jmp csum_partial_copy_generic
16882+#endif
16883+
16884+ENTRY(csum_partial_copy_generic_from_user)
16885+
16886+#ifdef CONFIG_PAX_MEMORY_UDEREF
16887+ pushl_cfi %gs
16888+ popl_cfi %ds
16889+#endif
16890+
16891+ENTRY(csum_partial_copy_generic)
16892 pushl_cfi %ebx
16893 CFI_REL_OFFSET ebx, 0
16894 pushl_cfi %edi
16895@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
16896 subl %ebx, %edi
16897 lea -1(%esi),%edx
16898 andl $-32,%edx
16899- lea 3f(%ebx,%ebx), %ebx
16900+ lea 3f(%ebx,%ebx,2), %ebx
16901 testl %esi, %esi
16902 jmp *%ebx
16903 1: addl $64,%esi
16904@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
16905 jb 5f
16906 SRC( movw (%esi), %dx )
16907 leal 2(%esi), %esi
16908-DST( movw %dx, (%edi) )
16909+DST( movw %dx, %es:(%edi) )
16910 leal 2(%edi), %edi
16911 je 6f
16912 shll $16,%edx
16913 5:
16914 SRC( movb (%esi), %dl )
16915-DST( movb %dl, (%edi) )
16916+DST( movb %dl, %es:(%edi) )
16917 6: addl %edx, %eax
16918 adcl $0, %eax
16919 7:
16920 .section .fixup, "ax"
16921 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
16922- movl $-EFAULT, (%ebx)
16923+ movl $-EFAULT, %ss:(%ebx)
16924 # zero the complete destination (computing the rest is too much work)
16925 movl ARGBASE+8(%esp),%edi # dst
16926 movl ARGBASE+12(%esp),%ecx # len
16927@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
16928 rep; stosb
16929 jmp 7b
16930 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16931- movl $-EFAULT, (%ebx)
16932+ movl $-EFAULT, %ss:(%ebx)
16933 jmp 7b
16934 .previous
16935
16936+#ifdef CONFIG_PAX_MEMORY_UDEREF
16937+ pushl_cfi %ss
16938+ popl_cfi %ds
16939+ pushl_cfi %ss
16940+ popl_cfi %es
16941+#endif
16942+
16943 popl_cfi %esi
16944 CFI_RESTORE esi
16945 popl_cfi %edi
16946@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
16947 CFI_RESTORE ebx
16948 ret
16949 CFI_ENDPROC
16950-ENDPROC(csum_partial_copy_generic)
16951+ENDPROC(csum_partial_copy_generic_to_user)
16952
16953 #undef ROUND
16954 #undef ROUND1
16955diff -urNp linux-3.0.4/arch/x86/lib/clear_page_64.S linux-3.0.4/arch/x86/lib/clear_page_64.S
16956--- linux-3.0.4/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
16957+++ linux-3.0.4/arch/x86/lib/clear_page_64.S 2011-08-23 21:47:55.000000000 -0400
16958@@ -58,7 +58,7 @@ ENDPROC(clear_page)
16959
16960 #include <asm/cpufeature.h>
16961
16962- .section .altinstr_replacement,"ax"
16963+ .section .altinstr_replacement,"a"
16964 1: .byte 0xeb /* jmp <disp8> */
16965 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
16966 2: .byte 0xeb /* jmp <disp8> */
16967diff -urNp linux-3.0.4/arch/x86/lib/copy_page_64.S linux-3.0.4/arch/x86/lib/copy_page_64.S
16968--- linux-3.0.4/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
16969+++ linux-3.0.4/arch/x86/lib/copy_page_64.S 2011-08-23 21:47:55.000000000 -0400
16970@@ -104,7 +104,7 @@ ENDPROC(copy_page)
16971
16972 #include <asm/cpufeature.h>
16973
16974- .section .altinstr_replacement,"ax"
16975+ .section .altinstr_replacement,"a"
16976 1: .byte 0xeb /* jmp <disp8> */
16977 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
16978 2:
16979diff -urNp linux-3.0.4/arch/x86/lib/copy_user_64.S linux-3.0.4/arch/x86/lib/copy_user_64.S
16980--- linux-3.0.4/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
16981+++ linux-3.0.4/arch/x86/lib/copy_user_64.S 2011-08-23 21:47:55.000000000 -0400
16982@@ -16,6 +16,7 @@
16983 #include <asm/thread_info.h>
16984 #include <asm/cpufeature.h>
16985 #include <asm/alternative-asm.h>
16986+#include <asm/pgtable.h>
16987
16988 /*
16989 * By placing feature2 after feature1 in altinstructions section, we logically
16990@@ -29,7 +30,7 @@
16991 .byte 0xe9 /* 32bit jump */
16992 .long \orig-1f /* by default jump to orig */
16993 1:
16994- .section .altinstr_replacement,"ax"
16995+ .section .altinstr_replacement,"a"
16996 2: .byte 0xe9 /* near jump with 32bit immediate */
16997 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
16998 3: .byte 0xe9 /* near jump with 32bit immediate */
16999@@ -71,41 +72,13 @@
17000 #endif
17001 .endm
17002
17003-/* Standard copy_to_user with segment limit checking */
17004-ENTRY(_copy_to_user)
17005- CFI_STARTPROC
17006- GET_THREAD_INFO(%rax)
17007- movq %rdi,%rcx
17008- addq %rdx,%rcx
17009- jc bad_to_user
17010- cmpq TI_addr_limit(%rax),%rcx
17011- ja bad_to_user
17012- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17013- copy_user_generic_unrolled,copy_user_generic_string, \
17014- copy_user_enhanced_fast_string
17015- CFI_ENDPROC
17016-ENDPROC(_copy_to_user)
17017-
17018-/* Standard copy_from_user with segment limit checking */
17019-ENTRY(_copy_from_user)
17020- CFI_STARTPROC
17021- GET_THREAD_INFO(%rax)
17022- movq %rsi,%rcx
17023- addq %rdx,%rcx
17024- jc bad_from_user
17025- cmpq TI_addr_limit(%rax),%rcx
17026- ja bad_from_user
17027- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17028- copy_user_generic_unrolled,copy_user_generic_string, \
17029- copy_user_enhanced_fast_string
17030- CFI_ENDPROC
17031-ENDPROC(_copy_from_user)
17032-
17033 .section .fixup,"ax"
17034 /* must zero dest */
17035 ENTRY(bad_from_user)
17036 bad_from_user:
17037 CFI_STARTPROC
17038+ testl %edx,%edx
17039+ js bad_to_user
17040 movl %edx,%ecx
17041 xorl %eax,%eax
17042 rep
17043diff -urNp linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S
17044--- linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
17045+++ linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S 2011-08-23 21:47:55.000000000 -0400
17046@@ -14,6 +14,7 @@
17047 #include <asm/current.h>
17048 #include <asm/asm-offsets.h>
17049 #include <asm/thread_info.h>
17050+#include <asm/pgtable.h>
17051
17052 .macro ALIGN_DESTINATION
17053 #ifdef FIX_ALIGNMENT
17054@@ -50,6 +51,15 @@
17055 */
17056 ENTRY(__copy_user_nocache)
17057 CFI_STARTPROC
17058+
17059+#ifdef CONFIG_PAX_MEMORY_UDEREF
17060+ mov $PAX_USER_SHADOW_BASE,%rcx
17061+ cmp %rcx,%rsi
17062+ jae 1f
17063+ add %rcx,%rsi
17064+1:
17065+#endif
17066+
17067 cmpl $8,%edx
17068 jb 20f /* less then 8 bytes, go to byte copy loop */
17069 ALIGN_DESTINATION
17070diff -urNp linux-3.0.4/arch/x86/lib/csum-wrappers_64.c linux-3.0.4/arch/x86/lib/csum-wrappers_64.c
17071--- linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
17072+++ linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-08-23 21:47:55.000000000 -0400
17073@@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17074 len -= 2;
17075 }
17076 }
17077+
17078+#ifdef CONFIG_PAX_MEMORY_UDEREF
17079+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17080+ src += PAX_USER_SHADOW_BASE;
17081+#endif
17082+
17083 isum = csum_partial_copy_generic((__force const void *)src,
17084 dst, len, isum, errp, NULL);
17085 if (unlikely(*errp))
17086@@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17087 }
17088
17089 *errp = 0;
17090+
17091+#ifdef CONFIG_PAX_MEMORY_UDEREF
17092+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17093+ dst += PAX_USER_SHADOW_BASE;
17094+#endif
17095+
17096 return csum_partial_copy_generic(src, (void __force *)dst,
17097 len, isum, NULL, errp);
17098 }
17099diff -urNp linux-3.0.4/arch/x86/lib/getuser.S linux-3.0.4/arch/x86/lib/getuser.S
17100--- linux-3.0.4/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
17101+++ linux-3.0.4/arch/x86/lib/getuser.S 2011-08-23 21:47:55.000000000 -0400
17102@@ -33,14 +33,35 @@
17103 #include <asm/asm-offsets.h>
17104 #include <asm/thread_info.h>
17105 #include <asm/asm.h>
17106+#include <asm/segment.h>
17107+#include <asm/pgtable.h>
17108+
17109+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17110+#define __copyuser_seg gs;
17111+#else
17112+#define __copyuser_seg
17113+#endif
17114
17115 .text
17116 ENTRY(__get_user_1)
17117 CFI_STARTPROC
17118+
17119+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17120 GET_THREAD_INFO(%_ASM_DX)
17121 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17122 jae bad_get_user
17123-1: movzb (%_ASM_AX),%edx
17124+
17125+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17126+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17127+ cmp %_ASM_DX,%_ASM_AX
17128+ jae 1234f
17129+ add %_ASM_DX,%_ASM_AX
17130+1234:
17131+#endif
17132+
17133+#endif
17134+
17135+1: __copyuser_seg movzb (%_ASM_AX),%edx
17136 xor %eax,%eax
17137 ret
17138 CFI_ENDPROC
17139@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17140 ENTRY(__get_user_2)
17141 CFI_STARTPROC
17142 add $1,%_ASM_AX
17143+
17144+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17145 jc bad_get_user
17146 GET_THREAD_INFO(%_ASM_DX)
17147 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17148 jae bad_get_user
17149-2: movzwl -1(%_ASM_AX),%edx
17150+
17151+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17152+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17153+ cmp %_ASM_DX,%_ASM_AX
17154+ jae 1234f
17155+ add %_ASM_DX,%_ASM_AX
17156+1234:
17157+#endif
17158+
17159+#endif
17160+
17161+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17162 xor %eax,%eax
17163 ret
17164 CFI_ENDPROC
17165@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17166 ENTRY(__get_user_4)
17167 CFI_STARTPROC
17168 add $3,%_ASM_AX
17169+
17170+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17171 jc bad_get_user
17172 GET_THREAD_INFO(%_ASM_DX)
17173 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17174 jae bad_get_user
17175-3: mov -3(%_ASM_AX),%edx
17176+
17177+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17178+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17179+ cmp %_ASM_DX,%_ASM_AX
17180+ jae 1234f
17181+ add %_ASM_DX,%_ASM_AX
17182+1234:
17183+#endif
17184+
17185+#endif
17186+
17187+3: __copyuser_seg mov -3(%_ASM_AX),%edx
17188 xor %eax,%eax
17189 ret
17190 CFI_ENDPROC
17191@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17192 GET_THREAD_INFO(%_ASM_DX)
17193 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17194 jae bad_get_user
17195+
17196+#ifdef CONFIG_PAX_MEMORY_UDEREF
17197+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17198+ cmp %_ASM_DX,%_ASM_AX
17199+ jae 1234f
17200+ add %_ASM_DX,%_ASM_AX
17201+1234:
17202+#endif
17203+
17204 4: movq -7(%_ASM_AX),%_ASM_DX
17205 xor %eax,%eax
17206 ret
17207diff -urNp linux-3.0.4/arch/x86/lib/insn.c linux-3.0.4/arch/x86/lib/insn.c
17208--- linux-3.0.4/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
17209+++ linux-3.0.4/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
17210@@ -21,6 +21,11 @@
17211 #include <linux/string.h>
17212 #include <asm/inat.h>
17213 #include <asm/insn.h>
17214+#ifdef __KERNEL__
17215+#include <asm/pgtable_types.h>
17216+#else
17217+#define ktla_ktva(addr) addr
17218+#endif
17219
17220 #define get_next(t, insn) \
17221 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17222@@ -40,8 +45,8 @@
17223 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17224 {
17225 memset(insn, 0, sizeof(*insn));
17226- insn->kaddr = kaddr;
17227- insn->next_byte = kaddr;
17228+ insn->kaddr = ktla_ktva(kaddr);
17229+ insn->next_byte = ktla_ktva(kaddr);
17230 insn->x86_64 = x86_64 ? 1 : 0;
17231 insn->opnd_bytes = 4;
17232 if (x86_64)
17233diff -urNp linux-3.0.4/arch/x86/lib/mmx_32.c linux-3.0.4/arch/x86/lib/mmx_32.c
17234--- linux-3.0.4/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
17235+++ linux-3.0.4/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
17236@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17237 {
17238 void *p;
17239 int i;
17240+ unsigned long cr0;
17241
17242 if (unlikely(in_interrupt()))
17243 return __memcpy(to, from, len);
17244@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17245 kernel_fpu_begin();
17246
17247 __asm__ __volatile__ (
17248- "1: prefetch (%0)\n" /* This set is 28 bytes */
17249- " prefetch 64(%0)\n"
17250- " prefetch 128(%0)\n"
17251- " prefetch 192(%0)\n"
17252- " prefetch 256(%0)\n"
17253+ "1: prefetch (%1)\n" /* This set is 28 bytes */
17254+ " prefetch 64(%1)\n"
17255+ " prefetch 128(%1)\n"
17256+ " prefetch 192(%1)\n"
17257+ " prefetch 256(%1)\n"
17258 "2: \n"
17259 ".section .fixup, \"ax\"\n"
17260- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17261+ "3: \n"
17262+
17263+#ifdef CONFIG_PAX_KERNEXEC
17264+ " movl %%cr0, %0\n"
17265+ " movl %0, %%eax\n"
17266+ " andl $0xFFFEFFFF, %%eax\n"
17267+ " movl %%eax, %%cr0\n"
17268+#endif
17269+
17270+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17271+
17272+#ifdef CONFIG_PAX_KERNEXEC
17273+ " movl %0, %%cr0\n"
17274+#endif
17275+
17276 " jmp 2b\n"
17277 ".previous\n"
17278 _ASM_EXTABLE(1b, 3b)
17279- : : "r" (from));
17280+ : "=&r" (cr0) : "r" (from) : "ax");
17281
17282 for ( ; i > 5; i--) {
17283 __asm__ __volatile__ (
17284- "1: prefetch 320(%0)\n"
17285- "2: movq (%0), %%mm0\n"
17286- " movq 8(%0), %%mm1\n"
17287- " movq 16(%0), %%mm2\n"
17288- " movq 24(%0), %%mm3\n"
17289- " movq %%mm0, (%1)\n"
17290- " movq %%mm1, 8(%1)\n"
17291- " movq %%mm2, 16(%1)\n"
17292- " movq %%mm3, 24(%1)\n"
17293- " movq 32(%0), %%mm0\n"
17294- " movq 40(%0), %%mm1\n"
17295- " movq 48(%0), %%mm2\n"
17296- " movq 56(%0), %%mm3\n"
17297- " movq %%mm0, 32(%1)\n"
17298- " movq %%mm1, 40(%1)\n"
17299- " movq %%mm2, 48(%1)\n"
17300- " movq %%mm3, 56(%1)\n"
17301+ "1: prefetch 320(%1)\n"
17302+ "2: movq (%1), %%mm0\n"
17303+ " movq 8(%1), %%mm1\n"
17304+ " movq 16(%1), %%mm2\n"
17305+ " movq 24(%1), %%mm3\n"
17306+ " movq %%mm0, (%2)\n"
17307+ " movq %%mm1, 8(%2)\n"
17308+ " movq %%mm2, 16(%2)\n"
17309+ " movq %%mm3, 24(%2)\n"
17310+ " movq 32(%1), %%mm0\n"
17311+ " movq 40(%1), %%mm1\n"
17312+ " movq 48(%1), %%mm2\n"
17313+ " movq 56(%1), %%mm3\n"
17314+ " movq %%mm0, 32(%2)\n"
17315+ " movq %%mm1, 40(%2)\n"
17316+ " movq %%mm2, 48(%2)\n"
17317+ " movq %%mm3, 56(%2)\n"
17318 ".section .fixup, \"ax\"\n"
17319- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17320+ "3:\n"
17321+
17322+#ifdef CONFIG_PAX_KERNEXEC
17323+ " movl %%cr0, %0\n"
17324+ " movl %0, %%eax\n"
17325+ " andl $0xFFFEFFFF, %%eax\n"
17326+ " movl %%eax, %%cr0\n"
17327+#endif
17328+
17329+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17330+
17331+#ifdef CONFIG_PAX_KERNEXEC
17332+ " movl %0, %%cr0\n"
17333+#endif
17334+
17335 " jmp 2b\n"
17336 ".previous\n"
17337 _ASM_EXTABLE(1b, 3b)
17338- : : "r" (from), "r" (to) : "memory");
17339+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17340
17341 from += 64;
17342 to += 64;
17343@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17344 static void fast_copy_page(void *to, void *from)
17345 {
17346 int i;
17347+ unsigned long cr0;
17348
17349 kernel_fpu_begin();
17350
17351@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17352 * but that is for later. -AV
17353 */
17354 __asm__ __volatile__(
17355- "1: prefetch (%0)\n"
17356- " prefetch 64(%0)\n"
17357- " prefetch 128(%0)\n"
17358- " prefetch 192(%0)\n"
17359- " prefetch 256(%0)\n"
17360+ "1: prefetch (%1)\n"
17361+ " prefetch 64(%1)\n"
17362+ " prefetch 128(%1)\n"
17363+ " prefetch 192(%1)\n"
17364+ " prefetch 256(%1)\n"
17365 "2: \n"
17366 ".section .fixup, \"ax\"\n"
17367- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17368+ "3: \n"
17369+
17370+#ifdef CONFIG_PAX_KERNEXEC
17371+ " movl %%cr0, %0\n"
17372+ " movl %0, %%eax\n"
17373+ " andl $0xFFFEFFFF, %%eax\n"
17374+ " movl %%eax, %%cr0\n"
17375+#endif
17376+
17377+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17378+
17379+#ifdef CONFIG_PAX_KERNEXEC
17380+ " movl %0, %%cr0\n"
17381+#endif
17382+
17383 " jmp 2b\n"
17384 ".previous\n"
17385- _ASM_EXTABLE(1b, 3b) : : "r" (from));
17386+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17387
17388 for (i = 0; i < (4096-320)/64; i++) {
17389 __asm__ __volatile__ (
17390- "1: prefetch 320(%0)\n"
17391- "2: movq (%0), %%mm0\n"
17392- " movntq %%mm0, (%1)\n"
17393- " movq 8(%0), %%mm1\n"
17394- " movntq %%mm1, 8(%1)\n"
17395- " movq 16(%0), %%mm2\n"
17396- " movntq %%mm2, 16(%1)\n"
17397- " movq 24(%0), %%mm3\n"
17398- " movntq %%mm3, 24(%1)\n"
17399- " movq 32(%0), %%mm4\n"
17400- " movntq %%mm4, 32(%1)\n"
17401- " movq 40(%0), %%mm5\n"
17402- " movntq %%mm5, 40(%1)\n"
17403- " movq 48(%0), %%mm6\n"
17404- " movntq %%mm6, 48(%1)\n"
17405- " movq 56(%0), %%mm7\n"
17406- " movntq %%mm7, 56(%1)\n"
17407+ "1: prefetch 320(%1)\n"
17408+ "2: movq (%1), %%mm0\n"
17409+ " movntq %%mm0, (%2)\n"
17410+ " movq 8(%1), %%mm1\n"
17411+ " movntq %%mm1, 8(%2)\n"
17412+ " movq 16(%1), %%mm2\n"
17413+ " movntq %%mm2, 16(%2)\n"
17414+ " movq 24(%1), %%mm3\n"
17415+ " movntq %%mm3, 24(%2)\n"
17416+ " movq 32(%1), %%mm4\n"
17417+ " movntq %%mm4, 32(%2)\n"
17418+ " movq 40(%1), %%mm5\n"
17419+ " movntq %%mm5, 40(%2)\n"
17420+ " movq 48(%1), %%mm6\n"
17421+ " movntq %%mm6, 48(%2)\n"
17422+ " movq 56(%1), %%mm7\n"
17423+ " movntq %%mm7, 56(%2)\n"
17424 ".section .fixup, \"ax\"\n"
17425- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17426+ "3:\n"
17427+
17428+#ifdef CONFIG_PAX_KERNEXEC
17429+ " movl %%cr0, %0\n"
17430+ " movl %0, %%eax\n"
17431+ " andl $0xFFFEFFFF, %%eax\n"
17432+ " movl %%eax, %%cr0\n"
17433+#endif
17434+
17435+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17436+
17437+#ifdef CONFIG_PAX_KERNEXEC
17438+ " movl %0, %%cr0\n"
17439+#endif
17440+
17441 " jmp 2b\n"
17442 ".previous\n"
17443- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17444+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17445
17446 from += 64;
17447 to += 64;
17448@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17449 static void fast_copy_page(void *to, void *from)
17450 {
17451 int i;
17452+ unsigned long cr0;
17453
17454 kernel_fpu_begin();
17455
17456 __asm__ __volatile__ (
17457- "1: prefetch (%0)\n"
17458- " prefetch 64(%0)\n"
17459- " prefetch 128(%0)\n"
17460- " prefetch 192(%0)\n"
17461- " prefetch 256(%0)\n"
17462+ "1: prefetch (%1)\n"
17463+ " prefetch 64(%1)\n"
17464+ " prefetch 128(%1)\n"
17465+ " prefetch 192(%1)\n"
17466+ " prefetch 256(%1)\n"
17467 "2: \n"
17468 ".section .fixup, \"ax\"\n"
17469- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17470+ "3: \n"
17471+
17472+#ifdef CONFIG_PAX_KERNEXEC
17473+ " movl %%cr0, %0\n"
17474+ " movl %0, %%eax\n"
17475+ " andl $0xFFFEFFFF, %%eax\n"
17476+ " movl %%eax, %%cr0\n"
17477+#endif
17478+
17479+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17480+
17481+#ifdef CONFIG_PAX_KERNEXEC
17482+ " movl %0, %%cr0\n"
17483+#endif
17484+
17485 " jmp 2b\n"
17486 ".previous\n"
17487- _ASM_EXTABLE(1b, 3b) : : "r" (from));
17488+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17489
17490 for (i = 0; i < 4096/64; i++) {
17491 __asm__ __volatile__ (
17492- "1: prefetch 320(%0)\n"
17493- "2: movq (%0), %%mm0\n"
17494- " movq 8(%0), %%mm1\n"
17495- " movq 16(%0), %%mm2\n"
17496- " movq 24(%0), %%mm3\n"
17497- " movq %%mm0, (%1)\n"
17498- " movq %%mm1, 8(%1)\n"
17499- " movq %%mm2, 16(%1)\n"
17500- " movq %%mm3, 24(%1)\n"
17501- " movq 32(%0), %%mm0\n"
17502- " movq 40(%0), %%mm1\n"
17503- " movq 48(%0), %%mm2\n"
17504- " movq 56(%0), %%mm3\n"
17505- " movq %%mm0, 32(%1)\n"
17506- " movq %%mm1, 40(%1)\n"
17507- " movq %%mm2, 48(%1)\n"
17508- " movq %%mm3, 56(%1)\n"
17509+ "1: prefetch 320(%1)\n"
17510+ "2: movq (%1), %%mm0\n"
17511+ " movq 8(%1), %%mm1\n"
17512+ " movq 16(%1), %%mm2\n"
17513+ " movq 24(%1), %%mm3\n"
17514+ " movq %%mm0, (%2)\n"
17515+ " movq %%mm1, 8(%2)\n"
17516+ " movq %%mm2, 16(%2)\n"
17517+ " movq %%mm3, 24(%2)\n"
17518+ " movq 32(%1), %%mm0\n"
17519+ " movq 40(%1), %%mm1\n"
17520+ " movq 48(%1), %%mm2\n"
17521+ " movq 56(%1), %%mm3\n"
17522+ " movq %%mm0, 32(%2)\n"
17523+ " movq %%mm1, 40(%2)\n"
17524+ " movq %%mm2, 48(%2)\n"
17525+ " movq %%mm3, 56(%2)\n"
17526 ".section .fixup, \"ax\"\n"
17527- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17528+ "3:\n"
17529+
17530+#ifdef CONFIG_PAX_KERNEXEC
17531+ " movl %%cr0, %0\n"
17532+ " movl %0, %%eax\n"
17533+ " andl $0xFFFEFFFF, %%eax\n"
17534+ " movl %%eax, %%cr0\n"
17535+#endif
17536+
17537+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17538+
17539+#ifdef CONFIG_PAX_KERNEXEC
17540+ " movl %0, %%cr0\n"
17541+#endif
17542+
17543 " jmp 2b\n"
17544 ".previous\n"
17545 _ASM_EXTABLE(1b, 3b)
17546- : : "r" (from), "r" (to) : "memory");
17547+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17548
17549 from += 64;
17550 to += 64;
17551diff -urNp linux-3.0.4/arch/x86/lib/putuser.S linux-3.0.4/arch/x86/lib/putuser.S
17552--- linux-3.0.4/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
17553+++ linux-3.0.4/arch/x86/lib/putuser.S 2011-08-23 21:47:55.000000000 -0400
17554@@ -15,7 +15,8 @@
17555 #include <asm/thread_info.h>
17556 #include <asm/errno.h>
17557 #include <asm/asm.h>
17558-
17559+#include <asm/segment.h>
17560+#include <asm/pgtable.h>
17561
17562 /*
17563 * __put_user_X
17564@@ -29,52 +30,119 @@
17565 * as they get called from within inline assembly.
17566 */
17567
17568-#define ENTER CFI_STARTPROC ; \
17569- GET_THREAD_INFO(%_ASM_BX)
17570+#define ENTER CFI_STARTPROC
17571 #define EXIT ret ; \
17572 CFI_ENDPROC
17573
17574+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17575+#define _DEST %_ASM_CX,%_ASM_BX
17576+#else
17577+#define _DEST %_ASM_CX
17578+#endif
17579+
17580+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17581+#define __copyuser_seg gs;
17582+#else
17583+#define __copyuser_seg
17584+#endif
17585+
17586 .text
17587 ENTRY(__put_user_1)
17588 ENTER
17589+
17590+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17591+ GET_THREAD_INFO(%_ASM_BX)
17592 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17593 jae bad_put_user
17594-1: movb %al,(%_ASM_CX)
17595+
17596+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17597+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17598+ cmp %_ASM_BX,%_ASM_CX
17599+ jb 1234f
17600+ xor %ebx,%ebx
17601+1234:
17602+#endif
17603+
17604+#endif
17605+
17606+1: __copyuser_seg movb %al,(_DEST)
17607 xor %eax,%eax
17608 EXIT
17609 ENDPROC(__put_user_1)
17610
17611 ENTRY(__put_user_2)
17612 ENTER
17613+
17614+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17615+ GET_THREAD_INFO(%_ASM_BX)
17616 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17617 sub $1,%_ASM_BX
17618 cmp %_ASM_BX,%_ASM_CX
17619 jae bad_put_user
17620-2: movw %ax,(%_ASM_CX)
17621+
17622+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17623+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17624+ cmp %_ASM_BX,%_ASM_CX
17625+ jb 1234f
17626+ xor %ebx,%ebx
17627+1234:
17628+#endif
17629+
17630+#endif
17631+
17632+2: __copyuser_seg movw %ax,(_DEST)
17633 xor %eax,%eax
17634 EXIT
17635 ENDPROC(__put_user_2)
17636
17637 ENTRY(__put_user_4)
17638 ENTER
17639+
17640+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17641+ GET_THREAD_INFO(%_ASM_BX)
17642 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17643 sub $3,%_ASM_BX
17644 cmp %_ASM_BX,%_ASM_CX
17645 jae bad_put_user
17646-3: movl %eax,(%_ASM_CX)
17647+
17648+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17649+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17650+ cmp %_ASM_BX,%_ASM_CX
17651+ jb 1234f
17652+ xor %ebx,%ebx
17653+1234:
17654+#endif
17655+
17656+#endif
17657+
17658+3: __copyuser_seg movl %eax,(_DEST)
17659 xor %eax,%eax
17660 EXIT
17661 ENDPROC(__put_user_4)
17662
17663 ENTRY(__put_user_8)
17664 ENTER
17665+
17666+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17667+ GET_THREAD_INFO(%_ASM_BX)
17668 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17669 sub $7,%_ASM_BX
17670 cmp %_ASM_BX,%_ASM_CX
17671 jae bad_put_user
17672-4: mov %_ASM_AX,(%_ASM_CX)
17673+
17674+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17675+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17676+ cmp %_ASM_BX,%_ASM_CX
17677+ jb 1234f
17678+ xor %ebx,%ebx
17679+1234:
17680+#endif
17681+
17682+#endif
17683+
17684+4: __copyuser_seg mov %_ASM_AX,(_DEST)
17685 #ifdef CONFIG_X86_32
17686-5: movl %edx,4(%_ASM_CX)
17687+5: __copyuser_seg movl %edx,4(_DEST)
17688 #endif
17689 xor %eax,%eax
17690 EXIT
17691diff -urNp linux-3.0.4/arch/x86/lib/usercopy_32.c linux-3.0.4/arch/x86/lib/usercopy_32.c
17692--- linux-3.0.4/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
17693+++ linux-3.0.4/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
17694@@ -43,7 +43,7 @@ do { \
17695 __asm__ __volatile__( \
17696 " testl %1,%1\n" \
17697 " jz 2f\n" \
17698- "0: lodsb\n" \
17699+ "0: "__copyuser_seg"lodsb\n" \
17700 " stosb\n" \
17701 " testb %%al,%%al\n" \
17702 " jz 1f\n" \
17703@@ -128,10 +128,12 @@ do { \
17704 int __d0; \
17705 might_fault(); \
17706 __asm__ __volatile__( \
17707+ __COPYUSER_SET_ES \
17708 "0: rep; stosl\n" \
17709 " movl %2,%0\n" \
17710 "1: rep; stosb\n" \
17711 "2:\n" \
17712+ __COPYUSER_RESTORE_ES \
17713 ".section .fixup,\"ax\"\n" \
17714 "3: lea 0(%2,%0,4),%0\n" \
17715 " jmp 2b\n" \
17716@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
17717 might_fault();
17718
17719 __asm__ __volatile__(
17720+ __COPYUSER_SET_ES
17721 " testl %0, %0\n"
17722 " jz 3f\n"
17723 " andl %0,%%ecx\n"
17724@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
17725 " subl %%ecx,%0\n"
17726 " addl %0,%%eax\n"
17727 "1:\n"
17728+ __COPYUSER_RESTORE_ES
17729 ".section .fixup,\"ax\"\n"
17730 "2: xorl %%eax,%%eax\n"
17731 " jmp 1b\n"
17732@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
17733
17734 #ifdef CONFIG_X86_INTEL_USERCOPY
17735 static unsigned long
17736-__copy_user_intel(void __user *to, const void *from, unsigned long size)
17737+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
17738 {
17739 int d0, d1;
17740 __asm__ __volatile__(
17741@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
17742 " .align 2,0x90\n"
17743 "3: movl 0(%4), %%eax\n"
17744 "4: movl 4(%4), %%edx\n"
17745- "5: movl %%eax, 0(%3)\n"
17746- "6: movl %%edx, 4(%3)\n"
17747+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
17748+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
17749 "7: movl 8(%4), %%eax\n"
17750 "8: movl 12(%4),%%edx\n"
17751- "9: movl %%eax, 8(%3)\n"
17752- "10: movl %%edx, 12(%3)\n"
17753+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
17754+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
17755 "11: movl 16(%4), %%eax\n"
17756 "12: movl 20(%4), %%edx\n"
17757- "13: movl %%eax, 16(%3)\n"
17758- "14: movl %%edx, 20(%3)\n"
17759+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
17760+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
17761 "15: movl 24(%4), %%eax\n"
17762 "16: movl 28(%4), %%edx\n"
17763- "17: movl %%eax, 24(%3)\n"
17764- "18: movl %%edx, 28(%3)\n"
17765+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
17766+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
17767 "19: movl 32(%4), %%eax\n"
17768 "20: movl 36(%4), %%edx\n"
17769- "21: movl %%eax, 32(%3)\n"
17770- "22: movl %%edx, 36(%3)\n"
17771+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
17772+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
17773 "23: movl 40(%4), %%eax\n"
17774 "24: movl 44(%4), %%edx\n"
17775- "25: movl %%eax, 40(%3)\n"
17776- "26: movl %%edx, 44(%3)\n"
17777+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
17778+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
17779 "27: movl 48(%4), %%eax\n"
17780 "28: movl 52(%4), %%edx\n"
17781- "29: movl %%eax, 48(%3)\n"
17782- "30: movl %%edx, 52(%3)\n"
17783+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
17784+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
17785 "31: movl 56(%4), %%eax\n"
17786 "32: movl 60(%4), %%edx\n"
17787- "33: movl %%eax, 56(%3)\n"
17788- "34: movl %%edx, 60(%3)\n"
17789+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
17790+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
17791 " addl $-64, %0\n"
17792 " addl $64, %4\n"
17793 " addl $64, %3\n"
17794@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
17795 " shrl $2, %0\n"
17796 " andl $3, %%eax\n"
17797 " cld\n"
17798+ __COPYUSER_SET_ES
17799 "99: rep; movsl\n"
17800 "36: movl %%eax, %0\n"
17801 "37: rep; movsb\n"
17802 "100:\n"
17803+ __COPYUSER_RESTORE_ES
17804+ ".section .fixup,\"ax\"\n"
17805+ "101: lea 0(%%eax,%0,4),%0\n"
17806+ " jmp 100b\n"
17807+ ".previous\n"
17808+ ".section __ex_table,\"a\"\n"
17809+ " .align 4\n"
17810+ " .long 1b,100b\n"
17811+ " .long 2b,100b\n"
17812+ " .long 3b,100b\n"
17813+ " .long 4b,100b\n"
17814+ " .long 5b,100b\n"
17815+ " .long 6b,100b\n"
17816+ " .long 7b,100b\n"
17817+ " .long 8b,100b\n"
17818+ " .long 9b,100b\n"
17819+ " .long 10b,100b\n"
17820+ " .long 11b,100b\n"
17821+ " .long 12b,100b\n"
17822+ " .long 13b,100b\n"
17823+ " .long 14b,100b\n"
17824+ " .long 15b,100b\n"
17825+ " .long 16b,100b\n"
17826+ " .long 17b,100b\n"
17827+ " .long 18b,100b\n"
17828+ " .long 19b,100b\n"
17829+ " .long 20b,100b\n"
17830+ " .long 21b,100b\n"
17831+ " .long 22b,100b\n"
17832+ " .long 23b,100b\n"
17833+ " .long 24b,100b\n"
17834+ " .long 25b,100b\n"
17835+ " .long 26b,100b\n"
17836+ " .long 27b,100b\n"
17837+ " .long 28b,100b\n"
17838+ " .long 29b,100b\n"
17839+ " .long 30b,100b\n"
17840+ " .long 31b,100b\n"
17841+ " .long 32b,100b\n"
17842+ " .long 33b,100b\n"
17843+ " .long 34b,100b\n"
17844+ " .long 35b,100b\n"
17845+ " .long 36b,100b\n"
17846+ " .long 37b,100b\n"
17847+ " .long 99b,101b\n"
17848+ ".previous"
17849+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
17850+ : "1"(to), "2"(from), "0"(size)
17851+ : "eax", "edx", "memory");
17852+ return size;
17853+}
17854+
17855+static unsigned long
17856+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
17857+{
17858+ int d0, d1;
17859+ __asm__ __volatile__(
17860+ " .align 2,0x90\n"
17861+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
17862+ " cmpl $67, %0\n"
17863+ " jbe 3f\n"
17864+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
17865+ " .align 2,0x90\n"
17866+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
17867+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
17868+ "5: movl %%eax, 0(%3)\n"
17869+ "6: movl %%edx, 4(%3)\n"
17870+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
17871+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
17872+ "9: movl %%eax, 8(%3)\n"
17873+ "10: movl %%edx, 12(%3)\n"
17874+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
17875+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
17876+ "13: movl %%eax, 16(%3)\n"
17877+ "14: movl %%edx, 20(%3)\n"
17878+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
17879+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
17880+ "17: movl %%eax, 24(%3)\n"
17881+ "18: movl %%edx, 28(%3)\n"
17882+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
17883+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
17884+ "21: movl %%eax, 32(%3)\n"
17885+ "22: movl %%edx, 36(%3)\n"
17886+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
17887+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
17888+ "25: movl %%eax, 40(%3)\n"
17889+ "26: movl %%edx, 44(%3)\n"
17890+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
17891+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
17892+ "29: movl %%eax, 48(%3)\n"
17893+ "30: movl %%edx, 52(%3)\n"
17894+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
17895+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
17896+ "33: movl %%eax, 56(%3)\n"
17897+ "34: movl %%edx, 60(%3)\n"
17898+ " addl $-64, %0\n"
17899+ " addl $64, %4\n"
17900+ " addl $64, %3\n"
17901+ " cmpl $63, %0\n"
17902+ " ja 1b\n"
17903+ "35: movl %0, %%eax\n"
17904+ " shrl $2, %0\n"
17905+ " andl $3, %%eax\n"
17906+ " cld\n"
17907+ "99: rep; "__copyuser_seg" movsl\n"
17908+ "36: movl %%eax, %0\n"
17909+ "37: rep; "__copyuser_seg" movsb\n"
17910+ "100:\n"
17911 ".section .fixup,\"ax\"\n"
17912 "101: lea 0(%%eax,%0,4),%0\n"
17913 " jmp 100b\n"
17914@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
17915 int d0, d1;
17916 __asm__ __volatile__(
17917 " .align 2,0x90\n"
17918- "0: movl 32(%4), %%eax\n"
17919+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
17920 " cmpl $67, %0\n"
17921 " jbe 2f\n"
17922- "1: movl 64(%4), %%eax\n"
17923+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
17924 " .align 2,0x90\n"
17925- "2: movl 0(%4), %%eax\n"
17926- "21: movl 4(%4), %%edx\n"
17927+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
17928+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
17929 " movl %%eax, 0(%3)\n"
17930 " movl %%edx, 4(%3)\n"
17931- "3: movl 8(%4), %%eax\n"
17932- "31: movl 12(%4),%%edx\n"
17933+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
17934+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
17935 " movl %%eax, 8(%3)\n"
17936 " movl %%edx, 12(%3)\n"
17937- "4: movl 16(%4), %%eax\n"
17938- "41: movl 20(%4), %%edx\n"
17939+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
17940+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
17941 " movl %%eax, 16(%3)\n"
17942 " movl %%edx, 20(%3)\n"
17943- "10: movl 24(%4), %%eax\n"
17944- "51: movl 28(%4), %%edx\n"
17945+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
17946+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
17947 " movl %%eax, 24(%3)\n"
17948 " movl %%edx, 28(%3)\n"
17949- "11: movl 32(%4), %%eax\n"
17950- "61: movl 36(%4), %%edx\n"
17951+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
17952+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
17953 " movl %%eax, 32(%3)\n"
17954 " movl %%edx, 36(%3)\n"
17955- "12: movl 40(%4), %%eax\n"
17956- "71: movl 44(%4), %%edx\n"
17957+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
17958+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
17959 " movl %%eax, 40(%3)\n"
17960 " movl %%edx, 44(%3)\n"
17961- "13: movl 48(%4), %%eax\n"
17962- "81: movl 52(%4), %%edx\n"
17963+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
17964+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
17965 " movl %%eax, 48(%3)\n"
17966 " movl %%edx, 52(%3)\n"
17967- "14: movl 56(%4), %%eax\n"
17968- "91: movl 60(%4), %%edx\n"
17969+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
17970+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
17971 " movl %%eax, 56(%3)\n"
17972 " movl %%edx, 60(%3)\n"
17973 " addl $-64, %0\n"
17974@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
17975 " shrl $2, %0\n"
17976 " andl $3, %%eax\n"
17977 " cld\n"
17978- "6: rep; movsl\n"
17979+ "6: rep; "__copyuser_seg" movsl\n"
17980 " movl %%eax,%0\n"
17981- "7: rep; movsb\n"
17982+ "7: rep; "__copyuser_seg" movsb\n"
17983 "8:\n"
17984 ".section .fixup,\"ax\"\n"
17985 "9: lea 0(%%eax,%0,4),%0\n"
17986@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
17987
17988 __asm__ __volatile__(
17989 " .align 2,0x90\n"
17990- "0: movl 32(%4), %%eax\n"
17991+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
17992 " cmpl $67, %0\n"
17993 " jbe 2f\n"
17994- "1: movl 64(%4), %%eax\n"
17995+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
17996 " .align 2,0x90\n"
17997- "2: movl 0(%4), %%eax\n"
17998- "21: movl 4(%4), %%edx\n"
17999+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18000+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18001 " movnti %%eax, 0(%3)\n"
18002 " movnti %%edx, 4(%3)\n"
18003- "3: movl 8(%4), %%eax\n"
18004- "31: movl 12(%4),%%edx\n"
18005+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18006+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18007 " movnti %%eax, 8(%3)\n"
18008 " movnti %%edx, 12(%3)\n"
18009- "4: movl 16(%4), %%eax\n"
18010- "41: movl 20(%4), %%edx\n"
18011+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18012+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18013 " movnti %%eax, 16(%3)\n"
18014 " movnti %%edx, 20(%3)\n"
18015- "10: movl 24(%4), %%eax\n"
18016- "51: movl 28(%4), %%edx\n"
18017+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18018+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18019 " movnti %%eax, 24(%3)\n"
18020 " movnti %%edx, 28(%3)\n"
18021- "11: movl 32(%4), %%eax\n"
18022- "61: movl 36(%4), %%edx\n"
18023+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18024+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18025 " movnti %%eax, 32(%3)\n"
18026 " movnti %%edx, 36(%3)\n"
18027- "12: movl 40(%4), %%eax\n"
18028- "71: movl 44(%4), %%edx\n"
18029+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18030+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18031 " movnti %%eax, 40(%3)\n"
18032 " movnti %%edx, 44(%3)\n"
18033- "13: movl 48(%4), %%eax\n"
18034- "81: movl 52(%4), %%edx\n"
18035+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18036+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18037 " movnti %%eax, 48(%3)\n"
18038 " movnti %%edx, 52(%3)\n"
18039- "14: movl 56(%4), %%eax\n"
18040- "91: movl 60(%4), %%edx\n"
18041+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18042+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18043 " movnti %%eax, 56(%3)\n"
18044 " movnti %%edx, 60(%3)\n"
18045 " addl $-64, %0\n"
18046@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18047 " shrl $2, %0\n"
18048 " andl $3, %%eax\n"
18049 " cld\n"
18050- "6: rep; movsl\n"
18051+ "6: rep; "__copyuser_seg" movsl\n"
18052 " movl %%eax,%0\n"
18053- "7: rep; movsb\n"
18054+ "7: rep; "__copyuser_seg" movsb\n"
18055 "8:\n"
18056 ".section .fixup,\"ax\"\n"
18057 "9: lea 0(%%eax,%0,4),%0\n"
18058@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18059
18060 __asm__ __volatile__(
18061 " .align 2,0x90\n"
18062- "0: movl 32(%4), %%eax\n"
18063+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18064 " cmpl $67, %0\n"
18065 " jbe 2f\n"
18066- "1: movl 64(%4), %%eax\n"
18067+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18068 " .align 2,0x90\n"
18069- "2: movl 0(%4), %%eax\n"
18070- "21: movl 4(%4), %%edx\n"
18071+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18072+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18073 " movnti %%eax, 0(%3)\n"
18074 " movnti %%edx, 4(%3)\n"
18075- "3: movl 8(%4), %%eax\n"
18076- "31: movl 12(%4),%%edx\n"
18077+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18078+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18079 " movnti %%eax, 8(%3)\n"
18080 " movnti %%edx, 12(%3)\n"
18081- "4: movl 16(%4), %%eax\n"
18082- "41: movl 20(%4), %%edx\n"
18083+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18084+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18085 " movnti %%eax, 16(%3)\n"
18086 " movnti %%edx, 20(%3)\n"
18087- "10: movl 24(%4), %%eax\n"
18088- "51: movl 28(%4), %%edx\n"
18089+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18090+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18091 " movnti %%eax, 24(%3)\n"
18092 " movnti %%edx, 28(%3)\n"
18093- "11: movl 32(%4), %%eax\n"
18094- "61: movl 36(%4), %%edx\n"
18095+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18096+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18097 " movnti %%eax, 32(%3)\n"
18098 " movnti %%edx, 36(%3)\n"
18099- "12: movl 40(%4), %%eax\n"
18100- "71: movl 44(%4), %%edx\n"
18101+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18102+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18103 " movnti %%eax, 40(%3)\n"
18104 " movnti %%edx, 44(%3)\n"
18105- "13: movl 48(%4), %%eax\n"
18106- "81: movl 52(%4), %%edx\n"
18107+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18108+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18109 " movnti %%eax, 48(%3)\n"
18110 " movnti %%edx, 52(%3)\n"
18111- "14: movl 56(%4), %%eax\n"
18112- "91: movl 60(%4), %%edx\n"
18113+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18114+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18115 " movnti %%eax, 56(%3)\n"
18116 " movnti %%edx, 60(%3)\n"
18117 " addl $-64, %0\n"
18118@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18119 " shrl $2, %0\n"
18120 " andl $3, %%eax\n"
18121 " cld\n"
18122- "6: rep; movsl\n"
18123+ "6: rep; "__copyuser_seg" movsl\n"
18124 " movl %%eax,%0\n"
18125- "7: rep; movsb\n"
18126+ "7: rep; "__copyuser_seg" movsb\n"
18127 "8:\n"
18128 ".section .fixup,\"ax\"\n"
18129 "9: lea 0(%%eax,%0,4),%0\n"
18130@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18131 */
18132 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18133 unsigned long size);
18134-unsigned long __copy_user_intel(void __user *to, const void *from,
18135+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18136+ unsigned long size);
18137+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18138 unsigned long size);
18139 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18140 const void __user *from, unsigned long size);
18141 #endif /* CONFIG_X86_INTEL_USERCOPY */
18142
18143 /* Generic arbitrary sized copy. */
18144-#define __copy_user(to, from, size) \
18145+#define __copy_user(to, from, size, prefix, set, restore) \
18146 do { \
18147 int __d0, __d1, __d2; \
18148 __asm__ __volatile__( \
18149+ set \
18150 " cmp $7,%0\n" \
18151 " jbe 1f\n" \
18152 " movl %1,%0\n" \
18153 " negl %0\n" \
18154 " andl $7,%0\n" \
18155 " subl %0,%3\n" \
18156- "4: rep; movsb\n" \
18157+ "4: rep; "prefix"movsb\n" \
18158 " movl %3,%0\n" \
18159 " shrl $2,%0\n" \
18160 " andl $3,%3\n" \
18161 " .align 2,0x90\n" \
18162- "0: rep; movsl\n" \
18163+ "0: rep; "prefix"movsl\n" \
18164 " movl %3,%0\n" \
18165- "1: rep; movsb\n" \
18166+ "1: rep; "prefix"movsb\n" \
18167 "2:\n" \
18168+ restore \
18169 ".section .fixup,\"ax\"\n" \
18170 "5: addl %3,%0\n" \
18171 " jmp 2b\n" \
18172@@ -682,14 +799,14 @@ do { \
18173 " negl %0\n" \
18174 " andl $7,%0\n" \
18175 " subl %0,%3\n" \
18176- "4: rep; movsb\n" \
18177+ "4: rep; "__copyuser_seg"movsb\n" \
18178 " movl %3,%0\n" \
18179 " shrl $2,%0\n" \
18180 " andl $3,%3\n" \
18181 " .align 2,0x90\n" \
18182- "0: rep; movsl\n" \
18183+ "0: rep; "__copyuser_seg"movsl\n" \
18184 " movl %3,%0\n" \
18185- "1: rep; movsb\n" \
18186+ "1: rep; "__copyuser_seg"movsb\n" \
18187 "2:\n" \
18188 ".section .fixup,\"ax\"\n" \
18189 "5: addl %3,%0\n" \
18190@@ -775,9 +892,9 @@ survive:
18191 }
18192 #endif
18193 if (movsl_is_ok(to, from, n))
18194- __copy_user(to, from, n);
18195+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18196 else
18197- n = __copy_user_intel(to, from, n);
18198+ n = __generic_copy_to_user_intel(to, from, n);
18199 return n;
18200 }
18201 EXPORT_SYMBOL(__copy_to_user_ll);
18202@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18203 unsigned long n)
18204 {
18205 if (movsl_is_ok(to, from, n))
18206- __copy_user(to, from, n);
18207+ __copy_user(to, from, n, __copyuser_seg, "", "");
18208 else
18209- n = __copy_user_intel((void __user *)to,
18210- (const void *)from, n);
18211+ n = __generic_copy_from_user_intel(to, from, n);
18212 return n;
18213 }
18214 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18215@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18216 if (n > 64 && cpu_has_xmm2)
18217 n = __copy_user_intel_nocache(to, from, n);
18218 else
18219- __copy_user(to, from, n);
18220+ __copy_user(to, from, n, __copyuser_seg, "", "");
18221 #else
18222- __copy_user(to, from, n);
18223+ __copy_user(to, from, n, __copyuser_seg, "", "");
18224 #endif
18225 return n;
18226 }
18227 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18228
18229-/**
18230- * copy_to_user: - Copy a block of data into user space.
18231- * @to: Destination address, in user space.
18232- * @from: Source address, in kernel space.
18233- * @n: Number of bytes to copy.
18234- *
18235- * Context: User context only. This function may sleep.
18236- *
18237- * Copy data from kernel space to user space.
18238- *
18239- * Returns number of bytes that could not be copied.
18240- * On success, this will be zero.
18241- */
18242-unsigned long
18243-copy_to_user(void __user *to, const void *from, unsigned long n)
18244+void copy_from_user_overflow(void)
18245 {
18246- if (access_ok(VERIFY_WRITE, to, n))
18247- n = __copy_to_user(to, from, n);
18248- return n;
18249+ WARN(1, "Buffer overflow detected!\n");
18250 }
18251-EXPORT_SYMBOL(copy_to_user);
18252+EXPORT_SYMBOL(copy_from_user_overflow);
18253
18254-/**
18255- * copy_from_user: - Copy a block of data from user space.
18256- * @to: Destination address, in kernel space.
18257- * @from: Source address, in user space.
18258- * @n: Number of bytes to copy.
18259- *
18260- * Context: User context only. This function may sleep.
18261- *
18262- * Copy data from user space to kernel space.
18263- *
18264- * Returns number of bytes that could not be copied.
18265- * On success, this will be zero.
18266- *
18267- * If some data could not be copied, this function will pad the copied
18268- * data to the requested size using zero bytes.
18269- */
18270-unsigned long
18271-_copy_from_user(void *to, const void __user *from, unsigned long n)
18272+void copy_to_user_overflow(void)
18273 {
18274- if (access_ok(VERIFY_READ, from, n))
18275- n = __copy_from_user(to, from, n);
18276- else
18277- memset(to, 0, n);
18278- return n;
18279+ WARN(1, "Buffer overflow detected!\n");
18280 }
18281-EXPORT_SYMBOL(_copy_from_user);
18282+EXPORT_SYMBOL(copy_to_user_overflow);
18283
18284-void copy_from_user_overflow(void)
18285+#ifdef CONFIG_PAX_MEMORY_UDEREF
18286+void __set_fs(mm_segment_t x)
18287 {
18288- WARN(1, "Buffer overflow detected!\n");
18289+ switch (x.seg) {
18290+ case 0:
18291+ loadsegment(gs, 0);
18292+ break;
18293+ case TASK_SIZE_MAX:
18294+ loadsegment(gs, __USER_DS);
18295+ break;
18296+ case -1UL:
18297+ loadsegment(gs, __KERNEL_DS);
18298+ break;
18299+ default:
18300+ BUG();
18301+ }
18302+ return;
18303 }
18304-EXPORT_SYMBOL(copy_from_user_overflow);
18305+EXPORT_SYMBOL(__set_fs);
18306+
18307+void set_fs(mm_segment_t x)
18308+{
18309+ current_thread_info()->addr_limit = x;
18310+ __set_fs(x);
18311+}
18312+EXPORT_SYMBOL(set_fs);
18313+#endif
18314diff -urNp linux-3.0.4/arch/x86/lib/usercopy_64.c linux-3.0.4/arch/x86/lib/usercopy_64.c
18315--- linux-3.0.4/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
18316+++ linux-3.0.4/arch/x86/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
18317@@ -42,6 +42,12 @@ long
18318 __strncpy_from_user(char *dst, const char __user *src, long count)
18319 {
18320 long res;
18321+
18322+#ifdef CONFIG_PAX_MEMORY_UDEREF
18323+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18324+ src += PAX_USER_SHADOW_BASE;
18325+#endif
18326+
18327 __do_strncpy_from_user(dst, src, count, res);
18328 return res;
18329 }
18330@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18331 {
18332 long __d0;
18333 might_fault();
18334+
18335+#ifdef CONFIG_PAX_MEMORY_UDEREF
18336+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18337+ addr += PAX_USER_SHADOW_BASE;
18338+#endif
18339+
18340 /* no memory constraint because it doesn't change any memory gcc knows
18341 about */
18342 asm volatile(
18343@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18344
18345 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18346 {
18347- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18348+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18349+
18350+#ifdef CONFIG_PAX_MEMORY_UDEREF
18351+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18352+ to += PAX_USER_SHADOW_BASE;
18353+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18354+ from += PAX_USER_SHADOW_BASE;
18355+#endif
18356+
18357 return copy_user_generic((__force void *)to, (__force void *)from, len);
18358- }
18359- return len;
18360+ }
18361+ return len;
18362 }
18363 EXPORT_SYMBOL(copy_in_user);
18364
18365diff -urNp linux-3.0.4/arch/x86/Makefile linux-3.0.4/arch/x86/Makefile
18366--- linux-3.0.4/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
18367+++ linux-3.0.4/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
18368@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18369 else
18370 BITS := 64
18371 UTS_MACHINE := x86_64
18372+ biarch := $(call cc-option,-m64)
18373 CHECKFLAGS += -D__x86_64__ -m64
18374
18375 KBUILD_AFLAGS += -m64
18376@@ -195,3 +196,12 @@ define archhelp
18377 echo ' FDARGS="..." arguments for the booted kernel'
18378 echo ' FDINITRD=file initrd for the booted kernel'
18379 endef
18380+
18381+define OLD_LD
18382+
18383+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18384+*** Please upgrade your binutils to 2.18 or newer
18385+endef
18386+
18387+archprepare:
18388+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18389diff -urNp linux-3.0.4/arch/x86/mm/extable.c linux-3.0.4/arch/x86/mm/extable.c
18390--- linux-3.0.4/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
18391+++ linux-3.0.4/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
18392@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
18393 const struct exception_table_entry *fixup;
18394
18395 #ifdef CONFIG_PNPBIOS
18396- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18397+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18398 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18399 extern u32 pnp_bios_is_utter_crap;
18400 pnp_bios_is_utter_crap = 1;
18401diff -urNp linux-3.0.4/arch/x86/mm/fault.c linux-3.0.4/arch/x86/mm/fault.c
18402--- linux-3.0.4/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
18403+++ linux-3.0.4/arch/x86/mm/fault.c 2011-08-23 21:48:14.000000000 -0400
18404@@ -13,10 +13,18 @@
18405 #include <linux/perf_event.h> /* perf_sw_event */
18406 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18407 #include <linux/prefetch.h> /* prefetchw */
18408+#include <linux/unistd.h>
18409+#include <linux/compiler.h>
18410
18411 #include <asm/traps.h> /* dotraplinkage, ... */
18412 #include <asm/pgalloc.h> /* pgd_*(), ... */
18413 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18414+#include <asm/vsyscall.h>
18415+#include <asm/tlbflush.h>
18416+
18417+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18418+#include <asm/stacktrace.h>
18419+#endif
18420
18421 /*
18422 * Page fault error code bits:
18423@@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
18424 int ret = 0;
18425
18426 /* kprobe_running() needs smp_processor_id() */
18427- if (kprobes_built_in() && !user_mode_vm(regs)) {
18428+ if (kprobes_built_in() && !user_mode(regs)) {
18429 preempt_disable();
18430 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18431 ret = 1;
18432@@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
18433 return !instr_lo || (instr_lo>>1) == 1;
18434 case 0x00:
18435 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18436- if (probe_kernel_address(instr, opcode))
18437+ if (user_mode(regs)) {
18438+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18439+ return 0;
18440+ } else if (probe_kernel_address(instr, opcode))
18441 return 0;
18442
18443 *prefetch = (instr_lo == 0xF) &&
18444@@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
18445 while (instr < max_instr) {
18446 unsigned char opcode;
18447
18448- if (probe_kernel_address(instr, opcode))
18449+ if (user_mode(regs)) {
18450+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18451+ break;
18452+ } else if (probe_kernel_address(instr, opcode))
18453 break;
18454
18455 instr++;
18456@@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
18457 force_sig_info(si_signo, &info, tsk);
18458 }
18459
18460+#ifdef CONFIG_PAX_EMUTRAMP
18461+static int pax_handle_fetch_fault(struct pt_regs *regs);
18462+#endif
18463+
18464+#ifdef CONFIG_PAX_PAGEEXEC
18465+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18466+{
18467+ pgd_t *pgd;
18468+ pud_t *pud;
18469+ pmd_t *pmd;
18470+
18471+ pgd = pgd_offset(mm, address);
18472+ if (!pgd_present(*pgd))
18473+ return NULL;
18474+ pud = pud_offset(pgd, address);
18475+ if (!pud_present(*pud))
18476+ return NULL;
18477+ pmd = pmd_offset(pud, address);
18478+ if (!pmd_present(*pmd))
18479+ return NULL;
18480+ return pmd;
18481+}
18482+#endif
18483+
18484 DEFINE_SPINLOCK(pgd_lock);
18485 LIST_HEAD(pgd_list);
18486
18487@@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
18488 for (address = VMALLOC_START & PMD_MASK;
18489 address >= TASK_SIZE && address < FIXADDR_TOP;
18490 address += PMD_SIZE) {
18491+
18492+#ifdef CONFIG_PAX_PER_CPU_PGD
18493+ unsigned long cpu;
18494+#else
18495 struct page *page;
18496+#endif
18497
18498 spin_lock(&pgd_lock);
18499+
18500+#ifdef CONFIG_PAX_PER_CPU_PGD
18501+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18502+ pgd_t *pgd = get_cpu_pgd(cpu);
18503+ pmd_t *ret;
18504+#else
18505 list_for_each_entry(page, &pgd_list, lru) {
18506+ pgd_t *pgd = page_address(page);
18507 spinlock_t *pgt_lock;
18508 pmd_t *ret;
18509
18510@@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
18511 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18512
18513 spin_lock(pgt_lock);
18514- ret = vmalloc_sync_one(page_address(page), address);
18515+#endif
18516+
18517+ ret = vmalloc_sync_one(pgd, address);
18518+
18519+#ifndef CONFIG_PAX_PER_CPU_PGD
18520 spin_unlock(pgt_lock);
18521+#endif
18522
18523 if (!ret)
18524 break;
18525@@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
18526 * an interrupt in the middle of a task switch..
18527 */
18528 pgd_paddr = read_cr3();
18529+
18530+#ifdef CONFIG_PAX_PER_CPU_PGD
18531+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
18532+#endif
18533+
18534 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
18535 if (!pmd_k)
18536 return -1;
18537@@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
18538 * happen within a race in page table update. In the later
18539 * case just flush:
18540 */
18541+
18542+#ifdef CONFIG_PAX_PER_CPU_PGD
18543+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
18544+ pgd = pgd_offset_cpu(smp_processor_id(), address);
18545+#else
18546 pgd = pgd_offset(current->active_mm, address);
18547+#endif
18548+
18549 pgd_ref = pgd_offset_k(address);
18550 if (pgd_none(*pgd_ref))
18551 return -1;
18552@@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
18553 static int is_errata100(struct pt_regs *regs, unsigned long address)
18554 {
18555 #ifdef CONFIG_X86_64
18556- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
18557+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
18558 return 1;
18559 #endif
18560 return 0;
18561@@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
18562 }
18563
18564 static const char nx_warning[] = KERN_CRIT
18565-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
18566+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
18567
18568 static void
18569 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
18570@@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
18571 if (!oops_may_print())
18572 return;
18573
18574- if (error_code & PF_INSTR) {
18575+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
18576 unsigned int level;
18577
18578 pte_t *pte = lookup_address(address, &level);
18579
18580 if (pte && pte_present(*pte) && !pte_exec(*pte))
18581- printk(nx_warning, current_uid());
18582+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
18583+ }
18584+
18585+#ifdef CONFIG_PAX_KERNEXEC
18586+ if (init_mm.start_code <= address && address < init_mm.end_code) {
18587+ if (current->signal->curr_ip)
18588+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18589+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
18590+ else
18591+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18592+ current->comm, task_pid_nr(current), current_uid(), current_euid());
18593 }
18594+#endif
18595
18596 printk(KERN_ALERT "BUG: unable to handle kernel ");
18597 if (address < PAGE_SIZE)
18598@@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
18599 unsigned long address, int si_code)
18600 {
18601 struct task_struct *tsk = current;
18602+#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18603+ struct mm_struct *mm = tsk->mm;
18604+#endif
18605+
18606+#ifdef CONFIG_X86_64
18607+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
18608+ if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
18609+ regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
18610+ regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
18611+ regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
18612+ return;
18613+ }
18614+ }
18615+#endif
18616+
18617+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18618+ if (mm && (error_code & PF_USER)) {
18619+ unsigned long ip = regs->ip;
18620+
18621+ if (v8086_mode(regs))
18622+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
18623+
18624+ /*
18625+ * It's possible to have interrupts off here:
18626+ */
18627+ local_irq_enable();
18628+
18629+#ifdef CONFIG_PAX_PAGEEXEC
18630+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
18631+ (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
18632+
18633+#ifdef CONFIG_PAX_EMUTRAMP
18634+ switch (pax_handle_fetch_fault(regs)) {
18635+ case 2:
18636+ return;
18637+ }
18638+#endif
18639+
18640+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18641+ do_group_exit(SIGKILL);
18642+ }
18643+#endif
18644+
18645+#ifdef CONFIG_PAX_SEGMEXEC
18646+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
18647+
18648+#ifdef CONFIG_PAX_EMUTRAMP
18649+ switch (pax_handle_fetch_fault(regs)) {
18650+ case 2:
18651+ return;
18652+ }
18653+#endif
18654+
18655+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18656+ do_group_exit(SIGKILL);
18657+ }
18658+#endif
18659+
18660+ }
18661+#endif
18662
18663 /* User mode accesses just cause a SIGSEGV */
18664 if (error_code & PF_USER) {
18665@@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
18666 return 1;
18667 }
18668
18669+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18670+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
18671+{
18672+ pte_t *pte;
18673+ pmd_t *pmd;
18674+ spinlock_t *ptl;
18675+ unsigned char pte_mask;
18676+
18677+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
18678+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
18679+ return 0;
18680+
18681+ /* PaX: it's our fault, let's handle it if we can */
18682+
18683+ /* PaX: take a look at read faults before acquiring any locks */
18684+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
18685+ /* instruction fetch attempt from a protected page in user mode */
18686+ up_read(&mm->mmap_sem);
18687+
18688+#ifdef CONFIG_PAX_EMUTRAMP
18689+ switch (pax_handle_fetch_fault(regs)) {
18690+ case 2:
18691+ return 1;
18692+ }
18693+#endif
18694+
18695+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
18696+ do_group_exit(SIGKILL);
18697+ }
18698+
18699+ pmd = pax_get_pmd(mm, address);
18700+ if (unlikely(!pmd))
18701+ return 0;
18702+
18703+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
18704+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
18705+ pte_unmap_unlock(pte, ptl);
18706+ return 0;
18707+ }
18708+
18709+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
18710+ /* write attempt to a protected page in user mode */
18711+ pte_unmap_unlock(pte, ptl);
18712+ return 0;
18713+ }
18714+
18715+#ifdef CONFIG_SMP
18716+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
18717+#else
18718+ if (likely(address > get_limit(regs->cs)))
18719+#endif
18720+ {
18721+ set_pte(pte, pte_mkread(*pte));
18722+ __flush_tlb_one(address);
18723+ pte_unmap_unlock(pte, ptl);
18724+ up_read(&mm->mmap_sem);
18725+ return 1;
18726+ }
18727+
18728+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
18729+
18730+ /*
18731+ * PaX: fill DTLB with user rights and retry
18732+ */
18733+ __asm__ __volatile__ (
18734+ "orb %2,(%1)\n"
18735+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
18736+/*
18737+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
18738+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
18739+ * page fault when examined during a TLB load attempt. this is true not only
18740+ * for PTEs holding a non-present entry but also present entries that will
18741+ * raise a page fault (such as those set up by PaX, or the copy-on-write
18742+ * mechanism). in effect it means that we do *not* need to flush the TLBs
18743+ * for our target pages since their PTEs are simply not in the TLBs at all.
18744+
18745+ * the best thing in omitting it is that we gain around 15-20% speed in the
18746+ * fast path of the page fault handler and can get rid of tracing since we
18747+ * can no longer flush unintended entries.
18748+ */
18749+ "invlpg (%0)\n"
18750+#endif
18751+ __copyuser_seg"testb $0,(%0)\n"
18752+ "xorb %3,(%1)\n"
18753+ :
18754+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
18755+ : "memory", "cc");
18756+ pte_unmap_unlock(pte, ptl);
18757+ up_read(&mm->mmap_sem);
18758+ return 1;
18759+}
18760+#endif
18761+
18762 /*
18763 * Handle a spurious fault caused by a stale TLB entry.
18764 *
18765@@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
18766 static inline int
18767 access_error(unsigned long error_code, struct vm_area_struct *vma)
18768 {
18769+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
18770+ return 1;
18771+
18772 if (error_code & PF_WRITE) {
18773 /* write, present and write, not present: */
18774 if (unlikely(!(vma->vm_flags & VM_WRITE)))
18775@@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
18776 {
18777 struct vm_area_struct *vma;
18778 struct task_struct *tsk;
18779- unsigned long address;
18780 struct mm_struct *mm;
18781 int fault;
18782 int write = error_code & PF_WRITE;
18783 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
18784 (write ? FAULT_FLAG_WRITE : 0);
18785
18786+ /* Get the faulting address: */
18787+ unsigned long address = read_cr2();
18788+
18789+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18790+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
18791+ if (!search_exception_tables(regs->ip)) {
18792+ bad_area_nosemaphore(regs, error_code, address);
18793+ return;
18794+ }
18795+ if (address < PAX_USER_SHADOW_BASE) {
18796+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
18797+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
18798+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
18799+ } else
18800+ address -= PAX_USER_SHADOW_BASE;
18801+ }
18802+#endif
18803+
18804 tsk = current;
18805 mm = tsk->mm;
18806
18807- /* Get the faulting address: */
18808- address = read_cr2();
18809-
18810 /*
18811 * Detect and handle instructions that would cause a page fault for
18812 * both a tracked kernel page and a userspace page.
18813@@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
18814 * User-mode registers count as a user access even for any
18815 * potential system fault or CPU buglet:
18816 */
18817- if (user_mode_vm(regs)) {
18818+ if (user_mode(regs)) {
18819 local_irq_enable();
18820 error_code |= PF_USER;
18821 } else {
18822@@ -1103,6 +1351,11 @@ retry:
18823 might_sleep();
18824 }
18825
18826+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18827+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
18828+ return;
18829+#endif
18830+
18831 vma = find_vma(mm, address);
18832 if (unlikely(!vma)) {
18833 bad_area(regs, error_code, address);
18834@@ -1114,18 +1367,24 @@ retry:
18835 bad_area(regs, error_code, address);
18836 return;
18837 }
18838- if (error_code & PF_USER) {
18839- /*
18840- * Accessing the stack below %sp is always a bug.
18841- * The large cushion allows instructions like enter
18842- * and pusha to work. ("enter $65535, $31" pushes
18843- * 32 pointers and then decrements %sp by 65535.)
18844- */
18845- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
18846- bad_area(regs, error_code, address);
18847- return;
18848- }
18849+ /*
18850+ * Accessing the stack below %sp is always a bug.
18851+ * The large cushion allows instructions like enter
18852+ * and pusha to work. ("enter $65535, $31" pushes
18853+ * 32 pointers and then decrements %sp by 65535.)
18854+ */
18855+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
18856+ bad_area(regs, error_code, address);
18857+ return;
18858 }
18859+
18860+#ifdef CONFIG_PAX_SEGMEXEC
18861+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
18862+ bad_area(regs, error_code, address);
18863+ return;
18864+ }
18865+#endif
18866+
18867 if (unlikely(expand_stack(vma, address))) {
18868 bad_area(regs, error_code, address);
18869 return;
18870@@ -1180,3 +1439,199 @@ good_area:
18871
18872 up_read(&mm->mmap_sem);
18873 }
18874+
18875+#ifdef CONFIG_PAX_EMUTRAMP
18876+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
18877+{
18878+ int err;
18879+
18880+ do { /* PaX: gcc trampoline emulation #1 */
18881+ unsigned char mov1, mov2;
18882+ unsigned short jmp;
18883+ unsigned int addr1, addr2;
18884+
18885+#ifdef CONFIG_X86_64
18886+ if ((regs->ip + 11) >> 32)
18887+ break;
18888+#endif
18889+
18890+ err = get_user(mov1, (unsigned char __user *)regs->ip);
18891+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18892+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
18893+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18894+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
18895+
18896+ if (err)
18897+ break;
18898+
18899+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
18900+ regs->cx = addr1;
18901+ regs->ax = addr2;
18902+ regs->ip = addr2;
18903+ return 2;
18904+ }
18905+ } while (0);
18906+
18907+ do { /* PaX: gcc trampoline emulation #2 */
18908+ unsigned char mov, jmp;
18909+ unsigned int addr1, addr2;
18910+
18911+#ifdef CONFIG_X86_64
18912+ if ((regs->ip + 9) >> 32)
18913+ break;
18914+#endif
18915+
18916+ err = get_user(mov, (unsigned char __user *)regs->ip);
18917+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18918+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
18919+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18920+
18921+ if (err)
18922+ break;
18923+
18924+ if (mov == 0xB9 && jmp == 0xE9) {
18925+ regs->cx = addr1;
18926+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
18927+ return 2;
18928+ }
18929+ } while (0);
18930+
18931+ return 1; /* PaX in action */
18932+}
18933+
18934+#ifdef CONFIG_X86_64
18935+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
18936+{
18937+ int err;
18938+
18939+ do { /* PaX: gcc trampoline emulation #1 */
18940+ unsigned short mov1, mov2, jmp1;
18941+ unsigned char jmp2;
18942+ unsigned int addr1;
18943+ unsigned long addr2;
18944+
18945+ err = get_user(mov1, (unsigned short __user *)regs->ip);
18946+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
18947+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
18948+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
18949+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
18950+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
18951+
18952+ if (err)
18953+ break;
18954+
18955+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18956+ regs->r11 = addr1;
18957+ regs->r10 = addr2;
18958+ regs->ip = addr1;
18959+ return 2;
18960+ }
18961+ } while (0);
18962+
18963+ do { /* PaX: gcc trampoline emulation #2 */
18964+ unsigned short mov1, mov2, jmp1;
18965+ unsigned char jmp2;
18966+ unsigned long addr1, addr2;
18967+
18968+ err = get_user(mov1, (unsigned short __user *)regs->ip);
18969+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
18970+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
18971+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
18972+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
18973+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
18974+
18975+ if (err)
18976+ break;
18977+
18978+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18979+ regs->r11 = addr1;
18980+ regs->r10 = addr2;
18981+ regs->ip = addr1;
18982+ return 2;
18983+ }
18984+ } while (0);
18985+
18986+ return 1; /* PaX in action */
18987+}
18988+#endif
18989+
18990+/*
18991+ * PaX: decide what to do with offenders (regs->ip = fault address)
18992+ *
18993+ * returns 1 when task should be killed
18994+ * 2 when gcc trampoline was detected
18995+ */
18996+static int pax_handle_fetch_fault(struct pt_regs *regs)
18997+{
18998+ if (v8086_mode(regs))
18999+ return 1;
19000+
19001+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19002+ return 1;
19003+
19004+#ifdef CONFIG_X86_32
19005+ return pax_handle_fetch_fault_32(regs);
19006+#else
19007+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19008+ return pax_handle_fetch_fault_32(regs);
19009+ else
19010+ return pax_handle_fetch_fault_64(regs);
19011+#endif
19012+}
19013+#endif
19014+
19015+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19016+void pax_report_insns(void *pc, void *sp)
19017+{
19018+ long i;
19019+
19020+ printk(KERN_ERR "PAX: bytes at PC: ");
19021+ for (i = 0; i < 20; i++) {
19022+ unsigned char c;
19023+ if (get_user(c, (__force unsigned char __user *)pc+i))
19024+ printk(KERN_CONT "?? ");
19025+ else
19026+ printk(KERN_CONT "%02x ", c);
19027+ }
19028+ printk("\n");
19029+
19030+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19031+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
19032+ unsigned long c;
19033+ if (get_user(c, (__force unsigned long __user *)sp+i))
19034+#ifdef CONFIG_X86_32
19035+ printk(KERN_CONT "???????? ");
19036+#else
19037+ printk(KERN_CONT "???????????????? ");
19038+#endif
19039+ else
19040+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19041+ }
19042+ printk("\n");
19043+}
19044+#endif
19045+
19046+/**
19047+ * probe_kernel_write(): safely attempt to write to a location
19048+ * @dst: address to write to
19049+ * @src: pointer to the data that shall be written
19050+ * @size: size of the data chunk
19051+ *
19052+ * Safely write to address @dst from the buffer at @src. If a kernel fault
19053+ * happens, handle that and return -EFAULT.
19054+ */
19055+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19056+{
19057+ long ret;
19058+ mm_segment_t old_fs = get_fs();
19059+
19060+ set_fs(KERNEL_DS);
19061+ pagefault_disable();
19062+ pax_open_kernel();
19063+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19064+ pax_close_kernel();
19065+ pagefault_enable();
19066+ set_fs(old_fs);
19067+
19068+ return ret ? -EFAULT : 0;
19069+}
19070diff -urNp linux-3.0.4/arch/x86/mm/gup.c linux-3.0.4/arch/x86/mm/gup.c
19071--- linux-3.0.4/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
19072+++ linux-3.0.4/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
19073@@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19074 addr = start;
19075 len = (unsigned long) nr_pages << PAGE_SHIFT;
19076 end = start + len;
19077- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19078+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19079 (void __user *)start, len)))
19080 return 0;
19081
19082diff -urNp linux-3.0.4/arch/x86/mm/highmem_32.c linux-3.0.4/arch/x86/mm/highmem_32.c
19083--- linux-3.0.4/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
19084+++ linux-3.0.4/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
19085@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19086 idx = type + KM_TYPE_NR*smp_processor_id();
19087 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19088 BUG_ON(!pte_none(*(kmap_pte-idx)));
19089+
19090+ pax_open_kernel();
19091 set_pte(kmap_pte-idx, mk_pte(page, prot));
19092+ pax_close_kernel();
19093
19094 return (void *)vaddr;
19095 }
19096diff -urNp linux-3.0.4/arch/x86/mm/hugetlbpage.c linux-3.0.4/arch/x86/mm/hugetlbpage.c
19097--- linux-3.0.4/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
19098+++ linux-3.0.4/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
19099@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19100 struct hstate *h = hstate_file(file);
19101 struct mm_struct *mm = current->mm;
19102 struct vm_area_struct *vma;
19103- unsigned long start_addr;
19104+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19105+
19106+#ifdef CONFIG_PAX_SEGMEXEC
19107+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19108+ pax_task_size = SEGMEXEC_TASK_SIZE;
19109+#endif
19110+
19111+ pax_task_size -= PAGE_SIZE;
19112
19113 if (len > mm->cached_hole_size) {
19114- start_addr = mm->free_area_cache;
19115+ start_addr = mm->free_area_cache;
19116 } else {
19117- start_addr = TASK_UNMAPPED_BASE;
19118- mm->cached_hole_size = 0;
19119+ start_addr = mm->mmap_base;
19120+ mm->cached_hole_size = 0;
19121 }
19122
19123 full_search:
19124@@ -280,26 +287,27 @@ full_search:
19125
19126 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19127 /* At this point: (!vma || addr < vma->vm_end). */
19128- if (TASK_SIZE - len < addr) {
19129+ if (pax_task_size - len < addr) {
19130 /*
19131 * Start a new search - just in case we missed
19132 * some holes.
19133 */
19134- if (start_addr != TASK_UNMAPPED_BASE) {
19135- start_addr = TASK_UNMAPPED_BASE;
19136+ if (start_addr != mm->mmap_base) {
19137+ start_addr = mm->mmap_base;
19138 mm->cached_hole_size = 0;
19139 goto full_search;
19140 }
19141 return -ENOMEM;
19142 }
19143- if (!vma || addr + len <= vma->vm_start) {
19144- mm->free_area_cache = addr + len;
19145- return addr;
19146- }
19147+ if (check_heap_stack_gap(vma, addr, len))
19148+ break;
19149 if (addr + mm->cached_hole_size < vma->vm_start)
19150 mm->cached_hole_size = vma->vm_start - addr;
19151 addr = ALIGN(vma->vm_end, huge_page_size(h));
19152 }
19153+
19154+ mm->free_area_cache = addr + len;
19155+ return addr;
19156 }
19157
19158 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19159@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19160 {
19161 struct hstate *h = hstate_file(file);
19162 struct mm_struct *mm = current->mm;
19163- struct vm_area_struct *vma, *prev_vma;
19164- unsigned long base = mm->mmap_base, addr = addr0;
19165+ struct vm_area_struct *vma;
19166+ unsigned long base = mm->mmap_base, addr;
19167 unsigned long largest_hole = mm->cached_hole_size;
19168- int first_time = 1;
19169
19170 /* don't allow allocations above current base */
19171 if (mm->free_area_cache > base)
19172@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19173 largest_hole = 0;
19174 mm->free_area_cache = base;
19175 }
19176-try_again:
19177+
19178 /* make sure it can fit in the remaining address space */
19179 if (mm->free_area_cache < len)
19180 goto fail;
19181
19182 /* either no address requested or can't fit in requested address hole */
19183- addr = (mm->free_area_cache - len) & huge_page_mask(h);
19184+ addr = (mm->free_area_cache - len);
19185 do {
19186+ addr &= huge_page_mask(h);
19187+ vma = find_vma(mm, addr);
19188 /*
19189 * Lookup failure means no vma is above this address,
19190 * i.e. return with success:
19191- */
19192- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19193- return addr;
19194-
19195- /*
19196 * new region fits between prev_vma->vm_end and
19197 * vma->vm_start, use it:
19198 */
19199- if (addr + len <= vma->vm_start &&
19200- (!prev_vma || (addr >= prev_vma->vm_end))) {
19201+ if (check_heap_stack_gap(vma, addr, len)) {
19202 /* remember the address as a hint for next time */
19203- mm->cached_hole_size = largest_hole;
19204- return (mm->free_area_cache = addr);
19205- } else {
19206- /* pull free_area_cache down to the first hole */
19207- if (mm->free_area_cache == vma->vm_end) {
19208- mm->free_area_cache = vma->vm_start;
19209- mm->cached_hole_size = largest_hole;
19210- }
19211+ mm->cached_hole_size = largest_hole;
19212+ return (mm->free_area_cache = addr);
19213+ }
19214+ /* pull free_area_cache down to the first hole */
19215+ if (mm->free_area_cache == vma->vm_end) {
19216+ mm->free_area_cache = vma->vm_start;
19217+ mm->cached_hole_size = largest_hole;
19218 }
19219
19220 /* remember the largest hole we saw so far */
19221 if (addr + largest_hole < vma->vm_start)
19222- largest_hole = vma->vm_start - addr;
19223+ largest_hole = vma->vm_start - addr;
19224
19225 /* try just below the current vma->vm_start */
19226- addr = (vma->vm_start - len) & huge_page_mask(h);
19227- } while (len <= vma->vm_start);
19228+ addr = skip_heap_stack_gap(vma, len);
19229+ } while (!IS_ERR_VALUE(addr));
19230
19231 fail:
19232 /*
19233- * if hint left us with no space for the requested
19234- * mapping then try again:
19235- */
19236- if (first_time) {
19237- mm->free_area_cache = base;
19238- largest_hole = 0;
19239- first_time = 0;
19240- goto try_again;
19241- }
19242- /*
19243 * A failed mmap() very likely causes application failure,
19244 * so fall back to the bottom-up function here. This scenario
19245 * can happen with large stack limits and large mmap()
19246 * allocations.
19247 */
19248- mm->free_area_cache = TASK_UNMAPPED_BASE;
19249+
19250+#ifdef CONFIG_PAX_SEGMEXEC
19251+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19252+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19253+ else
19254+#endif
19255+
19256+ mm->mmap_base = TASK_UNMAPPED_BASE;
19257+
19258+#ifdef CONFIG_PAX_RANDMMAP
19259+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19260+ mm->mmap_base += mm->delta_mmap;
19261+#endif
19262+
19263+ mm->free_area_cache = mm->mmap_base;
19264 mm->cached_hole_size = ~0UL;
19265 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19266 len, pgoff, flags);
19267@@ -386,6 +392,7 @@ fail:
19268 /*
19269 * Restore the topdown base:
19270 */
19271+ mm->mmap_base = base;
19272 mm->free_area_cache = base;
19273 mm->cached_hole_size = ~0UL;
19274
19275@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19276 struct hstate *h = hstate_file(file);
19277 struct mm_struct *mm = current->mm;
19278 struct vm_area_struct *vma;
19279+ unsigned long pax_task_size = TASK_SIZE;
19280
19281 if (len & ~huge_page_mask(h))
19282 return -EINVAL;
19283- if (len > TASK_SIZE)
19284+
19285+#ifdef CONFIG_PAX_SEGMEXEC
19286+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19287+ pax_task_size = SEGMEXEC_TASK_SIZE;
19288+#endif
19289+
19290+ pax_task_size -= PAGE_SIZE;
19291+
19292+ if (len > pax_task_size)
19293 return -ENOMEM;
19294
19295 if (flags & MAP_FIXED) {
19296@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19297 if (addr) {
19298 addr = ALIGN(addr, huge_page_size(h));
19299 vma = find_vma(mm, addr);
19300- if (TASK_SIZE - len >= addr &&
19301- (!vma || addr + len <= vma->vm_start))
19302+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19303 return addr;
19304 }
19305 if (mm->get_unmapped_area == arch_get_unmapped_area)
19306diff -urNp linux-3.0.4/arch/x86/mm/init_32.c linux-3.0.4/arch/x86/mm/init_32.c
19307--- linux-3.0.4/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
19308+++ linux-3.0.4/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
19309@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19310 }
19311
19312 /*
19313- * Creates a middle page table and puts a pointer to it in the
19314- * given global directory entry. This only returns the gd entry
19315- * in non-PAE compilation mode, since the middle layer is folded.
19316- */
19317-static pmd_t * __init one_md_table_init(pgd_t *pgd)
19318-{
19319- pud_t *pud;
19320- pmd_t *pmd_table;
19321-
19322-#ifdef CONFIG_X86_PAE
19323- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19324- if (after_bootmem)
19325- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19326- else
19327- pmd_table = (pmd_t *)alloc_low_page();
19328- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19329- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19330- pud = pud_offset(pgd, 0);
19331- BUG_ON(pmd_table != pmd_offset(pud, 0));
19332-
19333- return pmd_table;
19334- }
19335-#endif
19336- pud = pud_offset(pgd, 0);
19337- pmd_table = pmd_offset(pud, 0);
19338-
19339- return pmd_table;
19340-}
19341-
19342-/*
19343 * Create a page table and place a pointer to it in a middle page
19344 * directory entry:
19345 */
19346@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19347 page_table = (pte_t *)alloc_low_page();
19348
19349 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19350+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19351+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19352+#else
19353 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19354+#endif
19355 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19356 }
19357
19358 return pte_offset_kernel(pmd, 0);
19359 }
19360
19361+static pmd_t * __init one_md_table_init(pgd_t *pgd)
19362+{
19363+ pud_t *pud;
19364+ pmd_t *pmd_table;
19365+
19366+ pud = pud_offset(pgd, 0);
19367+ pmd_table = pmd_offset(pud, 0);
19368+
19369+ return pmd_table;
19370+}
19371+
19372 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19373 {
19374 int pgd_idx = pgd_index(vaddr);
19375@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19376 int pgd_idx, pmd_idx;
19377 unsigned long vaddr;
19378 pgd_t *pgd;
19379+ pud_t *pud;
19380 pmd_t *pmd;
19381 pte_t *pte = NULL;
19382
19383@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19384 pgd = pgd_base + pgd_idx;
19385
19386 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19387- pmd = one_md_table_init(pgd);
19388- pmd = pmd + pmd_index(vaddr);
19389+ pud = pud_offset(pgd, vaddr);
19390+ pmd = pmd_offset(pud, vaddr);
19391+
19392+#ifdef CONFIG_X86_PAE
19393+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19394+#endif
19395+
19396 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19397 pmd++, pmd_idx++) {
19398 pte = page_table_kmap_check(one_page_table_init(pmd),
19399@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19400 }
19401 }
19402
19403-static inline int is_kernel_text(unsigned long addr)
19404+static inline int is_kernel_text(unsigned long start, unsigned long end)
19405 {
19406- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19407- return 1;
19408- return 0;
19409+ if ((start > ktla_ktva((unsigned long)_etext) ||
19410+ end <= ktla_ktva((unsigned long)_stext)) &&
19411+ (start > ktla_ktva((unsigned long)_einittext) ||
19412+ end <= ktla_ktva((unsigned long)_sinittext)) &&
19413+
19414+#ifdef CONFIG_ACPI_SLEEP
19415+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19416+#endif
19417+
19418+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19419+ return 0;
19420+ return 1;
19421 }
19422
19423 /*
19424@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19425 unsigned long last_map_addr = end;
19426 unsigned long start_pfn, end_pfn;
19427 pgd_t *pgd_base = swapper_pg_dir;
19428- int pgd_idx, pmd_idx, pte_ofs;
19429+ unsigned int pgd_idx, pmd_idx, pte_ofs;
19430 unsigned long pfn;
19431 pgd_t *pgd;
19432+ pud_t *pud;
19433 pmd_t *pmd;
19434 pte_t *pte;
19435 unsigned pages_2m, pages_4k;
19436@@ -281,8 +282,13 @@ repeat:
19437 pfn = start_pfn;
19438 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19439 pgd = pgd_base + pgd_idx;
19440- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19441- pmd = one_md_table_init(pgd);
19442+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19443+ pud = pud_offset(pgd, 0);
19444+ pmd = pmd_offset(pud, 0);
19445+
19446+#ifdef CONFIG_X86_PAE
19447+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19448+#endif
19449
19450 if (pfn >= end_pfn)
19451 continue;
19452@@ -294,14 +300,13 @@ repeat:
19453 #endif
19454 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19455 pmd++, pmd_idx++) {
19456- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19457+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19458
19459 /*
19460 * Map with big pages if possible, otherwise
19461 * create normal page tables:
19462 */
19463 if (use_pse) {
19464- unsigned int addr2;
19465 pgprot_t prot = PAGE_KERNEL_LARGE;
19466 /*
19467 * first pass will use the same initial
19468@@ -311,11 +316,7 @@ repeat:
19469 __pgprot(PTE_IDENT_ATTR |
19470 _PAGE_PSE);
19471
19472- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19473- PAGE_OFFSET + PAGE_SIZE-1;
19474-
19475- if (is_kernel_text(addr) ||
19476- is_kernel_text(addr2))
19477+ if (is_kernel_text(address, address + PMD_SIZE))
19478 prot = PAGE_KERNEL_LARGE_EXEC;
19479
19480 pages_2m++;
19481@@ -332,7 +333,7 @@ repeat:
19482 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19483 pte += pte_ofs;
19484 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19485- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19486+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19487 pgprot_t prot = PAGE_KERNEL;
19488 /*
19489 * first pass will use the same initial
19490@@ -340,7 +341,7 @@ repeat:
19491 */
19492 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19493
19494- if (is_kernel_text(addr))
19495+ if (is_kernel_text(address, address + PAGE_SIZE))
19496 prot = PAGE_KERNEL_EXEC;
19497
19498 pages_4k++;
19499@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19500
19501 pud = pud_offset(pgd, va);
19502 pmd = pmd_offset(pud, va);
19503- if (!pmd_present(*pmd))
19504+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
19505 break;
19506
19507 pte = pte_offset_kernel(pmd, va);
19508@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19509
19510 static void __init pagetable_init(void)
19511 {
19512- pgd_t *pgd_base = swapper_pg_dir;
19513-
19514- permanent_kmaps_init(pgd_base);
19515+ permanent_kmaps_init(swapper_pg_dir);
19516 }
19517
19518-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19519+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19520 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19521
19522 /* user-defined highmem size */
19523@@ -757,6 +756,12 @@ void __init mem_init(void)
19524
19525 pci_iommu_alloc();
19526
19527+#ifdef CONFIG_PAX_PER_CPU_PGD
19528+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19529+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19530+ KERNEL_PGD_PTRS);
19531+#endif
19532+
19533 #ifdef CONFIG_FLATMEM
19534 BUG_ON(!mem_map);
19535 #endif
19536@@ -774,7 +779,7 @@ void __init mem_init(void)
19537 set_highmem_pages_init();
19538
19539 codesize = (unsigned long) &_etext - (unsigned long) &_text;
19540- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
19541+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
19542 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
19543
19544 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
19545@@ -815,10 +820,10 @@ void __init mem_init(void)
19546 ((unsigned long)&__init_end -
19547 (unsigned long)&__init_begin) >> 10,
19548
19549- (unsigned long)&_etext, (unsigned long)&_edata,
19550- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
19551+ (unsigned long)&_sdata, (unsigned long)&_edata,
19552+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
19553
19554- (unsigned long)&_text, (unsigned long)&_etext,
19555+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
19556 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
19557
19558 /*
19559@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
19560 if (!kernel_set_to_readonly)
19561 return;
19562
19563+ start = ktla_ktva(start);
19564 pr_debug("Set kernel text: %lx - %lx for read write\n",
19565 start, start+size);
19566
19567@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
19568 if (!kernel_set_to_readonly)
19569 return;
19570
19571+ start = ktla_ktva(start);
19572 pr_debug("Set kernel text: %lx - %lx for read only\n",
19573 start, start+size);
19574
19575@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
19576 unsigned long start = PFN_ALIGN(_text);
19577 unsigned long size = PFN_ALIGN(_etext) - start;
19578
19579+ start = ktla_ktva(start);
19580 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
19581 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
19582 size >> 10);
19583diff -urNp linux-3.0.4/arch/x86/mm/init_64.c linux-3.0.4/arch/x86/mm/init_64.c
19584--- linux-3.0.4/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
19585+++ linux-3.0.4/arch/x86/mm/init_64.c 2011-08-23 21:47:55.000000000 -0400
19586@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
19587 * around without checking the pgd every time.
19588 */
19589
19590-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
19591+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
19592 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19593
19594 int force_personality32;
19595@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
19596
19597 for (address = start; address <= end; address += PGDIR_SIZE) {
19598 const pgd_t *pgd_ref = pgd_offset_k(address);
19599+
19600+#ifdef CONFIG_PAX_PER_CPU_PGD
19601+ unsigned long cpu;
19602+#else
19603 struct page *page;
19604+#endif
19605
19606 if (pgd_none(*pgd_ref))
19607 continue;
19608
19609 spin_lock(&pgd_lock);
19610+
19611+#ifdef CONFIG_PAX_PER_CPU_PGD
19612+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19613+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
19614+#else
19615 list_for_each_entry(page, &pgd_list, lru) {
19616 pgd_t *pgd;
19617 spinlock_t *pgt_lock;
19618@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
19619 /* the pgt_lock only for Xen */
19620 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19621 spin_lock(pgt_lock);
19622+#endif
19623
19624 if (pgd_none(*pgd))
19625 set_pgd(pgd, *pgd_ref);
19626@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
19627 BUG_ON(pgd_page_vaddr(*pgd)
19628 != pgd_page_vaddr(*pgd_ref));
19629
19630+#ifndef CONFIG_PAX_PER_CPU_PGD
19631 spin_unlock(pgt_lock);
19632+#endif
19633+
19634 }
19635 spin_unlock(&pgd_lock);
19636 }
19637@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
19638 pmd = fill_pmd(pud, vaddr);
19639 pte = fill_pte(pmd, vaddr);
19640
19641+ pax_open_kernel();
19642 set_pte(pte, new_pte);
19643+ pax_close_kernel();
19644
19645 /*
19646 * It's enough to flush this one mapping.
19647@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
19648 pgd = pgd_offset_k((unsigned long)__va(phys));
19649 if (pgd_none(*pgd)) {
19650 pud = (pud_t *) spp_getpage();
19651- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
19652- _PAGE_USER));
19653+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
19654 }
19655 pud = pud_offset(pgd, (unsigned long)__va(phys));
19656 if (pud_none(*pud)) {
19657 pmd = (pmd_t *) spp_getpage();
19658- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
19659- _PAGE_USER));
19660+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
19661 }
19662 pmd = pmd_offset(pud, phys);
19663 BUG_ON(!pmd_none(*pmd));
19664@@ -693,6 +707,12 @@ void __init mem_init(void)
19665
19666 pci_iommu_alloc();
19667
19668+#ifdef CONFIG_PAX_PER_CPU_PGD
19669+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19670+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19671+ KERNEL_PGD_PTRS);
19672+#endif
19673+
19674 /* clear_bss() already clear the empty_zero_page */
19675
19676 reservedpages = 0;
19677@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
19678 static struct vm_area_struct gate_vma = {
19679 .vm_start = VSYSCALL_START,
19680 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
19681- .vm_page_prot = PAGE_READONLY_EXEC,
19682- .vm_flags = VM_READ | VM_EXEC
19683+ .vm_page_prot = PAGE_READONLY,
19684+ .vm_flags = VM_READ
19685 };
19686
19687 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
19688@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
19689
19690 const char *arch_vma_name(struct vm_area_struct *vma)
19691 {
19692- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19693+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19694 return "[vdso]";
19695 if (vma == &gate_vma)
19696 return "[vsyscall]";
19697diff -urNp linux-3.0.4/arch/x86/mm/init.c linux-3.0.4/arch/x86/mm/init.c
19698--- linux-3.0.4/arch/x86/mm/init.c 2011-07-21 22:17:23.000000000 -0400
19699+++ linux-3.0.4/arch/x86/mm/init.c 2011-08-23 21:48:14.000000000 -0400
19700@@ -31,7 +31,7 @@ int direct_gbpages
19701 static void __init find_early_table_space(unsigned long end, int use_pse,
19702 int use_gbpages)
19703 {
19704- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
19705+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
19706 phys_addr_t base;
19707
19708 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
19709@@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
19710 */
19711 int devmem_is_allowed(unsigned long pagenr)
19712 {
19713- if (pagenr <= 256)
19714+#ifdef CONFIG_GRKERNSEC_KMEM
19715+ /* allow BDA */
19716+ if (!pagenr)
19717+ return 1;
19718+ /* allow EBDA */
19719+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
19720+ return 1;
19721+#else
19722+ if (!pagenr)
19723+ return 1;
19724+#ifdef CONFIG_VM86
19725+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
19726+ return 1;
19727+#endif
19728+#endif
19729+
19730+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
19731 return 1;
19732+#ifdef CONFIG_GRKERNSEC_KMEM
19733+ /* throw out everything else below 1MB */
19734+ if (pagenr <= 256)
19735+ return 0;
19736+#endif
19737 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
19738 return 0;
19739 if (!page_is_ram(pagenr))
19740 return 1;
19741+
19742 return 0;
19743 }
19744
19745@@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
19746
19747 void free_initmem(void)
19748 {
19749+
19750+#ifdef CONFIG_PAX_KERNEXEC
19751+#ifdef CONFIG_X86_32
19752+ /* PaX: limit KERNEL_CS to actual size */
19753+ unsigned long addr, limit;
19754+ struct desc_struct d;
19755+ int cpu;
19756+
19757+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
19758+ limit = (limit - 1UL) >> PAGE_SHIFT;
19759+
19760+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
19761+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
19762+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
19763+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
19764+ }
19765+
19766+ /* PaX: make KERNEL_CS read-only */
19767+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
19768+ if (!paravirt_enabled())
19769+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
19770+/*
19771+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
19772+ pgd = pgd_offset_k(addr);
19773+ pud = pud_offset(pgd, addr);
19774+ pmd = pmd_offset(pud, addr);
19775+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19776+ }
19777+*/
19778+#ifdef CONFIG_X86_PAE
19779+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
19780+/*
19781+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
19782+ pgd = pgd_offset_k(addr);
19783+ pud = pud_offset(pgd, addr);
19784+ pmd = pmd_offset(pud, addr);
19785+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19786+ }
19787+*/
19788+#endif
19789+
19790+#ifdef CONFIG_MODULES
19791+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
19792+#endif
19793+
19794+#else
19795+ pgd_t *pgd;
19796+ pud_t *pud;
19797+ pmd_t *pmd;
19798+ unsigned long addr, end;
19799+
19800+ /* PaX: make kernel code/rodata read-only, rest non-executable */
19801+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
19802+ pgd = pgd_offset_k(addr);
19803+ pud = pud_offset(pgd, addr);
19804+ pmd = pmd_offset(pud, addr);
19805+ if (!pmd_present(*pmd))
19806+ continue;
19807+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
19808+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19809+ else
19810+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19811+ }
19812+
19813+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
19814+ end = addr + KERNEL_IMAGE_SIZE;
19815+ for (; addr < end; addr += PMD_SIZE) {
19816+ pgd = pgd_offset_k(addr);
19817+ pud = pud_offset(pgd, addr);
19818+ pmd = pmd_offset(pud, addr);
19819+ if (!pmd_present(*pmd))
19820+ continue;
19821+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
19822+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19823+ }
19824+#endif
19825+
19826+ flush_tlb_all();
19827+#endif
19828+
19829 free_init_pages("unused kernel memory",
19830 (unsigned long)(&__init_begin),
19831 (unsigned long)(&__init_end));
19832diff -urNp linux-3.0.4/arch/x86/mm/iomap_32.c linux-3.0.4/arch/x86/mm/iomap_32.c
19833--- linux-3.0.4/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
19834+++ linux-3.0.4/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
19835@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
19836 type = kmap_atomic_idx_push();
19837 idx = type + KM_TYPE_NR * smp_processor_id();
19838 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19839+
19840+ pax_open_kernel();
19841 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
19842+ pax_close_kernel();
19843+
19844 arch_flush_lazy_mmu_mode();
19845
19846 return (void *)vaddr;
19847diff -urNp linux-3.0.4/arch/x86/mm/ioremap.c linux-3.0.4/arch/x86/mm/ioremap.c
19848--- linux-3.0.4/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
19849+++ linux-3.0.4/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
19850@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
19851 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
19852 int is_ram = page_is_ram(pfn);
19853
19854- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
19855+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
19856 return NULL;
19857 WARN_ON_ONCE(is_ram);
19858 }
19859@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
19860 early_param("early_ioremap_debug", early_ioremap_debug_setup);
19861
19862 static __initdata int after_paging_init;
19863-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
19864+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
19865
19866 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
19867 {
19868@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
19869 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
19870
19871 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
19872- memset(bm_pte, 0, sizeof(bm_pte));
19873- pmd_populate_kernel(&init_mm, pmd, bm_pte);
19874+ pmd_populate_user(&init_mm, pmd, bm_pte);
19875
19876 /*
19877 * The boot-ioremap range spans multiple pmds, for which
19878diff -urNp linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c
19879--- linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
19880+++ linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
19881@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
19882 * memory (e.g. tracked pages)? For now, we need this to avoid
19883 * invoking kmemcheck for PnP BIOS calls.
19884 */
19885- if (regs->flags & X86_VM_MASK)
19886+ if (v8086_mode(regs))
19887 return false;
19888- if (regs->cs != __KERNEL_CS)
19889+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
19890 return false;
19891
19892 pte = kmemcheck_pte_lookup(address);
19893diff -urNp linux-3.0.4/arch/x86/mm/mmap.c linux-3.0.4/arch/x86/mm/mmap.c
19894--- linux-3.0.4/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
19895+++ linux-3.0.4/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
19896@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
19897 * Leave an at least ~128 MB hole with possible stack randomization.
19898 */
19899 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
19900-#define MAX_GAP (TASK_SIZE/6*5)
19901+#define MAX_GAP (pax_task_size/6*5)
19902
19903 /*
19904 * True on X86_32 or when emulating IA32 on X86_64
19905@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
19906 return rnd << PAGE_SHIFT;
19907 }
19908
19909-static unsigned long mmap_base(void)
19910+static unsigned long mmap_base(struct mm_struct *mm)
19911 {
19912 unsigned long gap = rlimit(RLIMIT_STACK);
19913+ unsigned long pax_task_size = TASK_SIZE;
19914+
19915+#ifdef CONFIG_PAX_SEGMEXEC
19916+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19917+ pax_task_size = SEGMEXEC_TASK_SIZE;
19918+#endif
19919
19920 if (gap < MIN_GAP)
19921 gap = MIN_GAP;
19922 else if (gap > MAX_GAP)
19923 gap = MAX_GAP;
19924
19925- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
19926+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
19927 }
19928
19929 /*
19930 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
19931 * does, but not when emulating X86_32
19932 */
19933-static unsigned long mmap_legacy_base(void)
19934+static unsigned long mmap_legacy_base(struct mm_struct *mm)
19935 {
19936- if (mmap_is_ia32())
19937+ if (mmap_is_ia32()) {
19938+
19939+#ifdef CONFIG_PAX_SEGMEXEC
19940+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19941+ return SEGMEXEC_TASK_UNMAPPED_BASE;
19942+ else
19943+#endif
19944+
19945 return TASK_UNMAPPED_BASE;
19946- else
19947+ } else
19948 return TASK_UNMAPPED_BASE + mmap_rnd();
19949 }
19950
19951@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
19952 void arch_pick_mmap_layout(struct mm_struct *mm)
19953 {
19954 if (mmap_is_legacy()) {
19955- mm->mmap_base = mmap_legacy_base();
19956+ mm->mmap_base = mmap_legacy_base(mm);
19957+
19958+#ifdef CONFIG_PAX_RANDMMAP
19959+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19960+ mm->mmap_base += mm->delta_mmap;
19961+#endif
19962+
19963 mm->get_unmapped_area = arch_get_unmapped_area;
19964 mm->unmap_area = arch_unmap_area;
19965 } else {
19966- mm->mmap_base = mmap_base();
19967+ mm->mmap_base = mmap_base(mm);
19968+
19969+#ifdef CONFIG_PAX_RANDMMAP
19970+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19971+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
19972+#endif
19973+
19974 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
19975 mm->unmap_area = arch_unmap_area_topdown;
19976 }
19977diff -urNp linux-3.0.4/arch/x86/mm/mmio-mod.c linux-3.0.4/arch/x86/mm/mmio-mod.c
19978--- linux-3.0.4/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
19979+++ linux-3.0.4/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
19980@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
19981 break;
19982 default:
19983 {
19984- unsigned char *ip = (unsigned char *)instptr;
19985+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
19986 my_trace->opcode = MMIO_UNKNOWN_OP;
19987 my_trace->width = 0;
19988 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
19989@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
19990 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
19991 void __iomem *addr)
19992 {
19993- static atomic_t next_id;
19994+ static atomic_unchecked_t next_id;
19995 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
19996 /* These are page-unaligned. */
19997 struct mmiotrace_map map = {
19998@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
19999 .private = trace
20000 },
20001 .phys = offset,
20002- .id = atomic_inc_return(&next_id)
20003+ .id = atomic_inc_return_unchecked(&next_id)
20004 };
20005 map.map_id = trace->id;
20006
20007diff -urNp linux-3.0.4/arch/x86/mm/pageattr.c linux-3.0.4/arch/x86/mm/pageattr.c
20008--- linux-3.0.4/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
20009+++ linux-3.0.4/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
20010@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20011 */
20012 #ifdef CONFIG_PCI_BIOS
20013 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20014- pgprot_val(forbidden) |= _PAGE_NX;
20015+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20016 #endif
20017
20018 /*
20019@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20020 * Does not cover __inittext since that is gone later on. On
20021 * 64bit we do not enforce !NX on the low mapping
20022 */
20023- if (within(address, (unsigned long)_text, (unsigned long)_etext))
20024- pgprot_val(forbidden) |= _PAGE_NX;
20025+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20026+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20027
20028+#ifdef CONFIG_DEBUG_RODATA
20029 /*
20030 * The .rodata section needs to be read-only. Using the pfn
20031 * catches all aliases.
20032@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20033 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20034 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20035 pgprot_val(forbidden) |= _PAGE_RW;
20036+#endif
20037
20038 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20039 /*
20040@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20041 }
20042 #endif
20043
20044+#ifdef CONFIG_PAX_KERNEXEC
20045+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20046+ pgprot_val(forbidden) |= _PAGE_RW;
20047+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20048+ }
20049+#endif
20050+
20051 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20052
20053 return prot;
20054@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20055 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20056 {
20057 /* change init_mm */
20058+ pax_open_kernel();
20059 set_pte_atomic(kpte, pte);
20060+
20061 #ifdef CONFIG_X86_32
20062 if (!SHARED_KERNEL_PMD) {
20063+
20064+#ifdef CONFIG_PAX_PER_CPU_PGD
20065+ unsigned long cpu;
20066+#else
20067 struct page *page;
20068+#endif
20069
20070+#ifdef CONFIG_PAX_PER_CPU_PGD
20071+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20072+ pgd_t *pgd = get_cpu_pgd(cpu);
20073+#else
20074 list_for_each_entry(page, &pgd_list, lru) {
20075- pgd_t *pgd;
20076+ pgd_t *pgd = (pgd_t *)page_address(page);
20077+#endif
20078+
20079 pud_t *pud;
20080 pmd_t *pmd;
20081
20082- pgd = (pgd_t *)page_address(page) + pgd_index(address);
20083+ pgd += pgd_index(address);
20084 pud = pud_offset(pgd, address);
20085 pmd = pmd_offset(pud, address);
20086 set_pte_atomic((pte_t *)pmd, pte);
20087 }
20088 }
20089 #endif
20090+ pax_close_kernel();
20091 }
20092
20093 static int
20094diff -urNp linux-3.0.4/arch/x86/mm/pageattr-test.c linux-3.0.4/arch/x86/mm/pageattr-test.c
20095--- linux-3.0.4/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
20096+++ linux-3.0.4/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
20097@@ -36,7 +36,7 @@ enum {
20098
20099 static int pte_testbit(pte_t pte)
20100 {
20101- return pte_flags(pte) & _PAGE_UNUSED1;
20102+ return pte_flags(pte) & _PAGE_CPA_TEST;
20103 }
20104
20105 struct split_state {
20106diff -urNp linux-3.0.4/arch/x86/mm/pat.c linux-3.0.4/arch/x86/mm/pat.c
20107--- linux-3.0.4/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
20108+++ linux-3.0.4/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
20109@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20110
20111 if (!entry) {
20112 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20113- current->comm, current->pid, start, end);
20114+ current->comm, task_pid_nr(current), start, end);
20115 return -EINVAL;
20116 }
20117
20118@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20119 while (cursor < to) {
20120 if (!devmem_is_allowed(pfn)) {
20121 printk(KERN_INFO
20122- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20123- current->comm, from, to);
20124+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20125+ current->comm, from, to, cursor);
20126 return 0;
20127 }
20128 cursor += PAGE_SIZE;
20129@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20130 printk(KERN_INFO
20131 "%s:%d ioremap_change_attr failed %s "
20132 "for %Lx-%Lx\n",
20133- current->comm, current->pid,
20134+ current->comm, task_pid_nr(current),
20135 cattr_name(flags),
20136 base, (unsigned long long)(base + size));
20137 return -EINVAL;
20138@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20139 if (want_flags != flags) {
20140 printk(KERN_WARNING
20141 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20142- current->comm, current->pid,
20143+ current->comm, task_pid_nr(current),
20144 cattr_name(want_flags),
20145 (unsigned long long)paddr,
20146 (unsigned long long)(paddr + size),
20147@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20148 free_memtype(paddr, paddr + size);
20149 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20150 " for %Lx-%Lx, got %s\n",
20151- current->comm, current->pid,
20152+ current->comm, task_pid_nr(current),
20153 cattr_name(want_flags),
20154 (unsigned long long)paddr,
20155 (unsigned long long)(paddr + size),
20156diff -urNp linux-3.0.4/arch/x86/mm/pf_in.c linux-3.0.4/arch/x86/mm/pf_in.c
20157--- linux-3.0.4/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
20158+++ linux-3.0.4/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
20159@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20160 int i;
20161 enum reason_type rv = OTHERS;
20162
20163- p = (unsigned char *)ins_addr;
20164+ p = (unsigned char *)ktla_ktva(ins_addr);
20165 p += skip_prefix(p, &prf);
20166 p += get_opcode(p, &opcode);
20167
20168@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20169 struct prefix_bits prf;
20170 int i;
20171
20172- p = (unsigned char *)ins_addr;
20173+ p = (unsigned char *)ktla_ktva(ins_addr);
20174 p += skip_prefix(p, &prf);
20175 p += get_opcode(p, &opcode);
20176
20177@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20178 struct prefix_bits prf;
20179 int i;
20180
20181- p = (unsigned char *)ins_addr;
20182+ p = (unsigned char *)ktla_ktva(ins_addr);
20183 p += skip_prefix(p, &prf);
20184 p += get_opcode(p, &opcode);
20185
20186@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
20187 struct prefix_bits prf;
20188 int i;
20189
20190- p = (unsigned char *)ins_addr;
20191+ p = (unsigned char *)ktla_ktva(ins_addr);
20192 p += skip_prefix(p, &prf);
20193 p += get_opcode(p, &opcode);
20194 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20195@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
20196 struct prefix_bits prf;
20197 int i;
20198
20199- p = (unsigned char *)ins_addr;
20200+ p = (unsigned char *)ktla_ktva(ins_addr);
20201 p += skip_prefix(p, &prf);
20202 p += get_opcode(p, &opcode);
20203 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20204diff -urNp linux-3.0.4/arch/x86/mm/pgtable_32.c linux-3.0.4/arch/x86/mm/pgtable_32.c
20205--- linux-3.0.4/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
20206+++ linux-3.0.4/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
20207@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20208 return;
20209 }
20210 pte = pte_offset_kernel(pmd, vaddr);
20211+
20212+ pax_open_kernel();
20213 if (pte_val(pteval))
20214 set_pte_at(&init_mm, vaddr, pte, pteval);
20215 else
20216 pte_clear(&init_mm, vaddr, pte);
20217+ pax_close_kernel();
20218
20219 /*
20220 * It's enough to flush this one mapping.
20221diff -urNp linux-3.0.4/arch/x86/mm/pgtable.c linux-3.0.4/arch/x86/mm/pgtable.c
20222--- linux-3.0.4/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
20223+++ linux-3.0.4/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
20224@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20225 list_del(&page->lru);
20226 }
20227
20228-#define UNSHARED_PTRS_PER_PGD \
20229- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20230+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20231+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20232
20233+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20234+{
20235+ while (count--)
20236+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20237+}
20238+#endif
20239+
20240+#ifdef CONFIG_PAX_PER_CPU_PGD
20241+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20242+{
20243+ while (count--)
20244+
20245+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20246+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20247+#else
20248+ *dst++ = *src++;
20249+#endif
20250
20251+}
20252+#endif
20253+
20254+#ifdef CONFIG_X86_64
20255+#define pxd_t pud_t
20256+#define pyd_t pgd_t
20257+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20258+#define pxd_free(mm, pud) pud_free((mm), (pud))
20259+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20260+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20261+#define PYD_SIZE PGDIR_SIZE
20262+#else
20263+#define pxd_t pmd_t
20264+#define pyd_t pud_t
20265+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20266+#define pxd_free(mm, pud) pmd_free((mm), (pud))
20267+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20268+#define pyd_offset(mm ,address) pud_offset((mm), (address))
20269+#define PYD_SIZE PUD_SIZE
20270+#endif
20271+
20272+#ifdef CONFIG_PAX_PER_CPU_PGD
20273+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20274+static inline void pgd_dtor(pgd_t *pgd) {}
20275+#else
20276 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20277 {
20278 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20279@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20280 pgd_list_del(pgd);
20281 spin_unlock(&pgd_lock);
20282 }
20283+#endif
20284
20285 /*
20286 * List of all pgd's needed for non-PAE so it can invalidate entries
20287@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20288 * -- wli
20289 */
20290
20291-#ifdef CONFIG_X86_PAE
20292+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20293 /*
20294 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20295 * updating the top-level pagetable entries to guarantee the
20296@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20297 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20298 * and initialize the kernel pmds here.
20299 */
20300-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20301+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20302
20303 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20304 {
20305@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20306 */
20307 flush_tlb_mm(mm);
20308 }
20309+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20310+#define PREALLOCATED_PXDS USER_PGD_PTRS
20311 #else /* !CONFIG_X86_PAE */
20312
20313 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20314-#define PREALLOCATED_PMDS 0
20315+#define PREALLOCATED_PXDS 0
20316
20317 #endif /* CONFIG_X86_PAE */
20318
20319-static void free_pmds(pmd_t *pmds[])
20320+static void free_pxds(pxd_t *pxds[])
20321 {
20322 int i;
20323
20324- for(i = 0; i < PREALLOCATED_PMDS; i++)
20325- if (pmds[i])
20326- free_page((unsigned long)pmds[i]);
20327+ for(i = 0; i < PREALLOCATED_PXDS; i++)
20328+ if (pxds[i])
20329+ free_page((unsigned long)pxds[i]);
20330 }
20331
20332-static int preallocate_pmds(pmd_t *pmds[])
20333+static int preallocate_pxds(pxd_t *pxds[])
20334 {
20335 int i;
20336 bool failed = false;
20337
20338- for(i = 0; i < PREALLOCATED_PMDS; i++) {
20339- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20340- if (pmd == NULL)
20341+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
20342+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20343+ if (pxd == NULL)
20344 failed = true;
20345- pmds[i] = pmd;
20346+ pxds[i] = pxd;
20347 }
20348
20349 if (failed) {
20350- free_pmds(pmds);
20351+ free_pxds(pxds);
20352 return -ENOMEM;
20353 }
20354
20355@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20356 * preallocate which never got a corresponding vma will need to be
20357 * freed manually.
20358 */
20359-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20360+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20361 {
20362 int i;
20363
20364- for(i = 0; i < PREALLOCATED_PMDS; i++) {
20365+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
20366 pgd_t pgd = pgdp[i];
20367
20368 if (pgd_val(pgd) != 0) {
20369- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20370+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20371
20372- pgdp[i] = native_make_pgd(0);
20373+ set_pgd(pgdp + i, native_make_pgd(0));
20374
20375- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20376- pmd_free(mm, pmd);
20377+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20378+ pxd_free(mm, pxd);
20379 }
20380 }
20381 }
20382
20383-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20384+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20385 {
20386- pud_t *pud;
20387+ pyd_t *pyd;
20388 unsigned long addr;
20389 int i;
20390
20391- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20392+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20393 return;
20394
20395- pud = pud_offset(pgd, 0);
20396+#ifdef CONFIG_X86_64
20397+ pyd = pyd_offset(mm, 0L);
20398+#else
20399+ pyd = pyd_offset(pgd, 0L);
20400+#endif
20401
20402- for (addr = i = 0; i < PREALLOCATED_PMDS;
20403- i++, pud++, addr += PUD_SIZE) {
20404- pmd_t *pmd = pmds[i];
20405+ for (addr = i = 0; i < PREALLOCATED_PXDS;
20406+ i++, pyd++, addr += PYD_SIZE) {
20407+ pxd_t *pxd = pxds[i];
20408
20409 if (i >= KERNEL_PGD_BOUNDARY)
20410- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20411- sizeof(pmd_t) * PTRS_PER_PMD);
20412+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20413+ sizeof(pxd_t) * PTRS_PER_PMD);
20414
20415- pud_populate(mm, pud, pmd);
20416+ pyd_populate(mm, pyd, pxd);
20417 }
20418 }
20419
20420 pgd_t *pgd_alloc(struct mm_struct *mm)
20421 {
20422 pgd_t *pgd;
20423- pmd_t *pmds[PREALLOCATED_PMDS];
20424+ pxd_t *pxds[PREALLOCATED_PXDS];
20425
20426 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20427
20428@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20429
20430 mm->pgd = pgd;
20431
20432- if (preallocate_pmds(pmds) != 0)
20433+ if (preallocate_pxds(pxds) != 0)
20434 goto out_free_pgd;
20435
20436 if (paravirt_pgd_alloc(mm) != 0)
20437- goto out_free_pmds;
20438+ goto out_free_pxds;
20439
20440 /*
20441 * Make sure that pre-populating the pmds is atomic with
20442@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20443 spin_lock(&pgd_lock);
20444
20445 pgd_ctor(mm, pgd);
20446- pgd_prepopulate_pmd(mm, pgd, pmds);
20447+ pgd_prepopulate_pxd(mm, pgd, pxds);
20448
20449 spin_unlock(&pgd_lock);
20450
20451 return pgd;
20452
20453-out_free_pmds:
20454- free_pmds(pmds);
20455+out_free_pxds:
20456+ free_pxds(pxds);
20457 out_free_pgd:
20458 free_page((unsigned long)pgd);
20459 out:
20460@@ -295,7 +344,7 @@ out:
20461
20462 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20463 {
20464- pgd_mop_up_pmds(mm, pgd);
20465+ pgd_mop_up_pxds(mm, pgd);
20466 pgd_dtor(pgd);
20467 paravirt_pgd_free(mm, pgd);
20468 free_page((unsigned long)pgd);
20469diff -urNp linux-3.0.4/arch/x86/mm/setup_nx.c linux-3.0.4/arch/x86/mm/setup_nx.c
20470--- linux-3.0.4/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
20471+++ linux-3.0.4/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
20472@@ -5,8 +5,10 @@
20473 #include <asm/pgtable.h>
20474 #include <asm/proto.h>
20475
20476+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20477 static int disable_nx __cpuinitdata;
20478
20479+#ifndef CONFIG_PAX_PAGEEXEC
20480 /*
20481 * noexec = on|off
20482 *
20483@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20484 return 0;
20485 }
20486 early_param("noexec", noexec_setup);
20487+#endif
20488+
20489+#endif
20490
20491 void __cpuinit x86_configure_nx(void)
20492 {
20493+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20494 if (cpu_has_nx && !disable_nx)
20495 __supported_pte_mask |= _PAGE_NX;
20496 else
20497+#endif
20498 __supported_pte_mask &= ~_PAGE_NX;
20499 }
20500
20501diff -urNp linux-3.0.4/arch/x86/mm/tlb.c linux-3.0.4/arch/x86/mm/tlb.c
20502--- linux-3.0.4/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
20503+++ linux-3.0.4/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
20504@@ -65,7 +65,11 @@ void leave_mm(int cpu)
20505 BUG();
20506 cpumask_clear_cpu(cpu,
20507 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20508+
20509+#ifndef CONFIG_PAX_PER_CPU_PGD
20510 load_cr3(swapper_pg_dir);
20511+#endif
20512+
20513 }
20514 EXPORT_SYMBOL_GPL(leave_mm);
20515
20516diff -urNp linux-3.0.4/arch/x86/net/bpf_jit_comp.c linux-3.0.4/arch/x86/net/bpf_jit_comp.c
20517--- linux-3.0.4/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
20518+++ linux-3.0.4/arch/x86/net/bpf_jit_comp.c 2011-08-23 21:47:55.000000000 -0400
20519@@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
20520 module_free(NULL, image);
20521 return;
20522 }
20523+ pax_open_kernel();
20524 memcpy(image + proglen, temp, ilen);
20525+ pax_close_kernel();
20526 }
20527 proglen += ilen;
20528 addrs[i] = proglen;
20529@@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
20530 break;
20531 }
20532 if (proglen == oldproglen) {
20533- image = module_alloc(max_t(unsigned int,
20534+ image = module_alloc_exec(max_t(unsigned int,
20535 proglen,
20536 sizeof(struct work_struct)));
20537 if (!image)
20538diff -urNp linux-3.0.4/arch/x86/oprofile/backtrace.c linux-3.0.4/arch/x86/oprofile/backtrace.c
20539--- linux-3.0.4/arch/x86/oprofile/backtrace.c 2011-09-02 18:11:21.000000000 -0400
20540+++ linux-3.0.4/arch/x86/oprofile/backtrace.c 2011-08-23 21:47:55.000000000 -0400
20541@@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
20542 {
20543 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
20544
20545- if (!user_mode_vm(regs)) {
20546+ if (!user_mode(regs)) {
20547 unsigned long stack = kernel_stack_pointer(regs);
20548 if (depth)
20549 dump_trace(NULL, regs, (unsigned long *)stack, 0,
20550diff -urNp linux-3.0.4/arch/x86/pci/mrst.c linux-3.0.4/arch/x86/pci/mrst.c
20551--- linux-3.0.4/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
20552+++ linux-3.0.4/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
20553@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
20554 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
20555 pci_mmcfg_late_init();
20556 pcibios_enable_irq = mrst_pci_irq_enable;
20557- pci_root_ops = pci_mrst_ops;
20558+ pax_open_kernel();
20559+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
20560+ pax_close_kernel();
20561 /* Continue with standard init */
20562 return 1;
20563 }
20564diff -urNp linux-3.0.4/arch/x86/pci/pcbios.c linux-3.0.4/arch/x86/pci/pcbios.c
20565--- linux-3.0.4/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
20566+++ linux-3.0.4/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
20567@@ -79,50 +79,93 @@ union bios32 {
20568 static struct {
20569 unsigned long address;
20570 unsigned short segment;
20571-} bios32_indirect = { 0, __KERNEL_CS };
20572+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
20573
20574 /*
20575 * Returns the entry point for the given service, NULL on error
20576 */
20577
20578-static unsigned long bios32_service(unsigned long service)
20579+static unsigned long __devinit bios32_service(unsigned long service)
20580 {
20581 unsigned char return_code; /* %al */
20582 unsigned long address; /* %ebx */
20583 unsigned long length; /* %ecx */
20584 unsigned long entry; /* %edx */
20585 unsigned long flags;
20586+ struct desc_struct d, *gdt;
20587
20588 local_irq_save(flags);
20589- __asm__("lcall *(%%edi); cld"
20590+
20591+ gdt = get_cpu_gdt_table(smp_processor_id());
20592+
20593+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
20594+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20595+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
20596+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20597+
20598+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
20599 : "=a" (return_code),
20600 "=b" (address),
20601 "=c" (length),
20602 "=d" (entry)
20603 : "0" (service),
20604 "1" (0),
20605- "D" (&bios32_indirect));
20606+ "D" (&bios32_indirect),
20607+ "r"(__PCIBIOS_DS)
20608+ : "memory");
20609+
20610+ pax_open_kernel();
20611+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
20612+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
20613+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
20614+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
20615+ pax_close_kernel();
20616+
20617 local_irq_restore(flags);
20618
20619 switch (return_code) {
20620- case 0:
20621- return address + entry;
20622- case 0x80: /* Not present */
20623- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20624- return 0;
20625- default: /* Shouldn't happen */
20626- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20627- service, return_code);
20628+ case 0: {
20629+ int cpu;
20630+ unsigned char flags;
20631+
20632+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
20633+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
20634+ printk(KERN_WARNING "bios32_service: not valid\n");
20635 return 0;
20636+ }
20637+ address = address + PAGE_OFFSET;
20638+ length += 16UL; /* some BIOSs underreport this... */
20639+ flags = 4;
20640+ if (length >= 64*1024*1024) {
20641+ length >>= PAGE_SHIFT;
20642+ flags |= 8;
20643+ }
20644+
20645+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
20646+ gdt = get_cpu_gdt_table(cpu);
20647+ pack_descriptor(&d, address, length, 0x9b, flags);
20648+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20649+ pack_descriptor(&d, address, length, 0x93, flags);
20650+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20651+ }
20652+ return entry;
20653+ }
20654+ case 0x80: /* Not present */
20655+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20656+ return 0;
20657+ default: /* Shouldn't happen */
20658+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20659+ service, return_code);
20660+ return 0;
20661 }
20662 }
20663
20664 static struct {
20665 unsigned long address;
20666 unsigned short segment;
20667-} pci_indirect = { 0, __KERNEL_CS };
20668+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
20669
20670-static int pci_bios_present;
20671+static int pci_bios_present __read_only;
20672
20673 static int __devinit check_pcibios(void)
20674 {
20675@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
20676 unsigned long flags, pcibios_entry;
20677
20678 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
20679- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
20680+ pci_indirect.address = pcibios_entry;
20681
20682 local_irq_save(flags);
20683- __asm__(
20684- "lcall *(%%edi); cld\n\t"
20685+ __asm__("movw %w6, %%ds\n\t"
20686+ "lcall *%%ss:(%%edi); cld\n\t"
20687+ "push %%ss\n\t"
20688+ "pop %%ds\n\t"
20689 "jc 1f\n\t"
20690 "xor %%ah, %%ah\n"
20691 "1:"
20692@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
20693 "=b" (ebx),
20694 "=c" (ecx)
20695 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
20696- "D" (&pci_indirect)
20697+ "D" (&pci_indirect),
20698+ "r" (__PCIBIOS_DS)
20699 : "memory");
20700 local_irq_restore(flags);
20701
20702@@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
20703
20704 switch (len) {
20705 case 1:
20706- __asm__("lcall *(%%esi); cld\n\t"
20707+ __asm__("movw %w6, %%ds\n\t"
20708+ "lcall *%%ss:(%%esi); cld\n\t"
20709+ "push %%ss\n\t"
20710+ "pop %%ds\n\t"
20711 "jc 1f\n\t"
20712 "xor %%ah, %%ah\n"
20713 "1:"
20714@@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
20715 : "1" (PCIBIOS_READ_CONFIG_BYTE),
20716 "b" (bx),
20717 "D" ((long)reg),
20718- "S" (&pci_indirect));
20719+ "S" (&pci_indirect),
20720+ "r" (__PCIBIOS_DS));
20721 /*
20722 * Zero-extend the result beyond 8 bits, do not trust the
20723 * BIOS having done it:
20724@@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
20725 *value &= 0xff;
20726 break;
20727 case 2:
20728- __asm__("lcall *(%%esi); cld\n\t"
20729+ __asm__("movw %w6, %%ds\n\t"
20730+ "lcall *%%ss:(%%esi); cld\n\t"
20731+ "push %%ss\n\t"
20732+ "pop %%ds\n\t"
20733 "jc 1f\n\t"
20734 "xor %%ah, %%ah\n"
20735 "1:"
20736@@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
20737 : "1" (PCIBIOS_READ_CONFIG_WORD),
20738 "b" (bx),
20739 "D" ((long)reg),
20740- "S" (&pci_indirect));
20741+ "S" (&pci_indirect),
20742+ "r" (__PCIBIOS_DS));
20743 /*
20744 * Zero-extend the result beyond 16 bits, do not trust the
20745 * BIOS having done it:
20746@@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
20747 *value &= 0xffff;
20748 break;
20749 case 4:
20750- __asm__("lcall *(%%esi); cld\n\t"
20751+ __asm__("movw %w6, %%ds\n\t"
20752+ "lcall *%%ss:(%%esi); cld\n\t"
20753+ "push %%ss\n\t"
20754+ "pop %%ds\n\t"
20755 "jc 1f\n\t"
20756 "xor %%ah, %%ah\n"
20757 "1:"
20758@@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
20759 : "1" (PCIBIOS_READ_CONFIG_DWORD),
20760 "b" (bx),
20761 "D" ((long)reg),
20762- "S" (&pci_indirect));
20763+ "S" (&pci_indirect),
20764+ "r" (__PCIBIOS_DS));
20765 break;
20766 }
20767
20768@@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
20769
20770 switch (len) {
20771 case 1:
20772- __asm__("lcall *(%%esi); cld\n\t"
20773+ __asm__("movw %w6, %%ds\n\t"
20774+ "lcall *%%ss:(%%esi); cld\n\t"
20775+ "push %%ss\n\t"
20776+ "pop %%ds\n\t"
20777 "jc 1f\n\t"
20778 "xor %%ah, %%ah\n"
20779 "1:"
20780@@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
20781 "c" (value),
20782 "b" (bx),
20783 "D" ((long)reg),
20784- "S" (&pci_indirect));
20785+ "S" (&pci_indirect),
20786+ "r" (__PCIBIOS_DS));
20787 break;
20788 case 2:
20789- __asm__("lcall *(%%esi); cld\n\t"
20790+ __asm__("movw %w6, %%ds\n\t"
20791+ "lcall *%%ss:(%%esi); cld\n\t"
20792+ "push %%ss\n\t"
20793+ "pop %%ds\n\t"
20794 "jc 1f\n\t"
20795 "xor %%ah, %%ah\n"
20796 "1:"
20797@@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
20798 "c" (value),
20799 "b" (bx),
20800 "D" ((long)reg),
20801- "S" (&pci_indirect));
20802+ "S" (&pci_indirect),
20803+ "r" (__PCIBIOS_DS));
20804 break;
20805 case 4:
20806- __asm__("lcall *(%%esi); cld\n\t"
20807+ __asm__("movw %w6, %%ds\n\t"
20808+ "lcall *%%ss:(%%esi); cld\n\t"
20809+ "push %%ss\n\t"
20810+ "pop %%ds\n\t"
20811 "jc 1f\n\t"
20812 "xor %%ah, %%ah\n"
20813 "1:"
20814@@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
20815 "c" (value),
20816 "b" (bx),
20817 "D" ((long)reg),
20818- "S" (&pci_indirect));
20819+ "S" (&pci_indirect),
20820+ "r" (__PCIBIOS_DS));
20821 break;
20822 }
20823
20824@@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
20825
20826 DBG("PCI: Fetching IRQ routing table... ");
20827 __asm__("push %%es\n\t"
20828+ "movw %w8, %%ds\n\t"
20829 "push %%ds\n\t"
20830 "pop %%es\n\t"
20831- "lcall *(%%esi); cld\n\t"
20832+ "lcall *%%ss:(%%esi); cld\n\t"
20833 "pop %%es\n\t"
20834+ "push %%ss\n\t"
20835+ "pop %%ds\n"
20836 "jc 1f\n\t"
20837 "xor %%ah, %%ah\n"
20838 "1:"
20839@@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
20840 "1" (0),
20841 "D" ((long) &opt),
20842 "S" (&pci_indirect),
20843- "m" (opt)
20844+ "m" (opt),
20845+ "r" (__PCIBIOS_DS)
20846 : "memory");
20847 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
20848 if (ret & 0xff00)
20849@@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
20850 {
20851 int ret;
20852
20853- __asm__("lcall *(%%esi); cld\n\t"
20854+ __asm__("movw %w5, %%ds\n\t"
20855+ "lcall *%%ss:(%%esi); cld\n\t"
20856+ "push %%ss\n\t"
20857+ "pop %%ds\n"
20858 "jc 1f\n\t"
20859 "xor %%ah, %%ah\n"
20860 "1:"
20861@@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
20862 : "0" (PCIBIOS_SET_PCI_HW_INT),
20863 "b" ((dev->bus->number << 8) | dev->devfn),
20864 "c" ((irq << 8) | (pin + 10)),
20865- "S" (&pci_indirect));
20866+ "S" (&pci_indirect),
20867+ "r" (__PCIBIOS_DS));
20868 return !(ret & 0xff00);
20869 }
20870 EXPORT_SYMBOL(pcibios_set_irq_routing);
20871diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_32.c linux-3.0.4/arch/x86/platform/efi/efi_32.c
20872--- linux-3.0.4/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
20873+++ linux-3.0.4/arch/x86/platform/efi/efi_32.c 2011-09-17 00:53:42.000000000 -0400
20874@@ -38,70 +38,56 @@
20875 */
20876
20877 static unsigned long efi_rt_eflags;
20878-static pgd_t efi_bak_pg_dir_pointer[2];
20879+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
20880
20881-void efi_call_phys_prelog(void)
20882+void __init efi_call_phys_prelog(void)
20883 {
20884- unsigned long cr4;
20885- unsigned long temp;
20886 struct desc_ptr gdt_descr;
20887
20888- local_irq_save(efi_rt_eflags);
20889+#ifdef CONFIG_PAX_KERNEXEC
20890+ struct desc_struct d;
20891+#endif
20892
20893- /*
20894- * If I don't have PAE, I should just duplicate two entries in page
20895- * directory. If I have PAE, I just need to duplicate one entry in
20896- * page directory.
20897- */
20898- cr4 = read_cr4_safe();
20899+ local_irq_save(efi_rt_eflags);
20900
20901- if (cr4 & X86_CR4_PAE) {
20902- efi_bak_pg_dir_pointer[0].pgd =
20903- swapper_pg_dir[pgd_index(0)].pgd;
20904- swapper_pg_dir[0].pgd =
20905- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20906- } else {
20907- efi_bak_pg_dir_pointer[0].pgd =
20908- swapper_pg_dir[pgd_index(0)].pgd;
20909- efi_bak_pg_dir_pointer[1].pgd =
20910- swapper_pg_dir[pgd_index(0x400000)].pgd;
20911- swapper_pg_dir[pgd_index(0)].pgd =
20912- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20913- temp = PAGE_OFFSET + 0x400000;
20914- swapper_pg_dir[pgd_index(0x400000)].pgd =
20915- swapper_pg_dir[pgd_index(temp)].pgd;
20916- }
20917+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
20918+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20919+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
20920
20921 /*
20922 * After the lock is released, the original page table is restored.
20923 */
20924 __flush_tlb_all();
20925
20926- gdt_descr.address = __pa(get_cpu_gdt_table(0));
20927+#ifdef CONFIG_PAX_KERNEXEC
20928+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
20929+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_EFI_CS, &d, DESCTYPE_S);
20930+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
20931+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_EFI_DS, &d, DESCTYPE_S);
20932+#endif
20933+
20934+ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
20935 gdt_descr.size = GDT_SIZE - 1;
20936 load_gdt(&gdt_descr);
20937 }
20938
20939-void efi_call_phys_epilog(void)
20940+void __init efi_call_phys_epilog(void)
20941 {
20942- unsigned long cr4;
20943 struct desc_ptr gdt_descr;
20944
20945- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
20946+#ifdef CONFIG_PAX_KERNEXEC
20947+ struct desc_struct d;
20948+
20949+ memset(&d, 0, sizeof d);
20950+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_EFI_CS, &d, DESCTYPE_S);
20951+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_EFI_DS, &d, DESCTYPE_S);
20952+#endif
20953+
20954+ gdt_descr.address = get_cpu_gdt_table(0);
20955 gdt_descr.size = GDT_SIZE - 1;
20956 load_gdt(&gdt_descr);
20957
20958- cr4 = read_cr4_safe();
20959-
20960- if (cr4 & X86_CR4_PAE) {
20961- swapper_pg_dir[pgd_index(0)].pgd =
20962- efi_bak_pg_dir_pointer[0].pgd;
20963- } else {
20964- swapper_pg_dir[pgd_index(0)].pgd =
20965- efi_bak_pg_dir_pointer[0].pgd;
20966- swapper_pg_dir[pgd_index(0x400000)].pgd =
20967- efi_bak_pg_dir_pointer[1].pgd;
20968- }
20969+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
20970
20971 /*
20972 * After the lock is released, the original page table is restored.
20973diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S
20974--- linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
20975+++ linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S 2011-09-17 00:53:42.000000000 -0400
20976@@ -6,6 +6,7 @@
20977 */
20978
20979 #include <linux/linkage.h>
20980+#include <linux/init.h>
20981 #include <asm/page_types.h>
20982
20983 /*
20984@@ -20,7 +21,7 @@
20985 * service functions will comply with gcc calling convention, too.
20986 */
20987
20988-.text
20989+__INIT
20990 ENTRY(efi_call_phys)
20991 /*
20992 * 0. The function can only be called in Linux kernel. So CS has been
20993@@ -36,9 +37,11 @@ ENTRY(efi_call_phys)
20994 * The mapping of lower virtual memory has been created in prelog and
20995 * epilog.
20996 */
20997- movl $1f, %edx
20998- subl $__PAGE_OFFSET, %edx
20999- jmp *%edx
21000+ movl $(__KERNEXEC_EFI_DS), %edx
21001+ mov %edx, %ds
21002+ mov %edx, %es
21003+ mov %edx, %ss
21004+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
21005 1:
21006
21007 /*
21008@@ -47,14 +50,8 @@ ENTRY(efi_call_phys)
21009 * parameter 2, ..., param n. To make things easy, we save the return
21010 * address of efi_call_phys in a global variable.
21011 */
21012- popl %edx
21013- movl %edx, saved_return_addr
21014- /* get the function pointer into ECX*/
21015- popl %ecx
21016- movl %ecx, efi_rt_function_ptr
21017- movl $2f, %edx
21018- subl $__PAGE_OFFSET, %edx
21019- pushl %edx
21020+ popl (saved_return_addr)
21021+ popl (efi_rt_function_ptr)
21022
21023 /*
21024 * 3. Clear PG bit in %CR0.
21025@@ -73,9 +70,8 @@ ENTRY(efi_call_phys)
21026 /*
21027 * 5. Call the physical function.
21028 */
21029- jmp *%ecx
21030+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
21031
21032-2:
21033 /*
21034 * 6. After EFI runtime service returns, control will return to
21035 * following instruction. We'd better readjust stack pointer first.
21036@@ -88,35 +84,32 @@ ENTRY(efi_call_phys)
21037 movl %cr0, %edx
21038 orl $0x80000000, %edx
21039 movl %edx, %cr0
21040- jmp 1f
21041-1:
21042+
21043 /*
21044 * 8. Now restore the virtual mode from flat mode by
21045 * adding EIP with PAGE_OFFSET.
21046 */
21047- movl $1f, %edx
21048- jmp *%edx
21049+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
21050 1:
21051+ movl $(__KERNEL_DS), %edx
21052+ mov %edx, %ds
21053+ mov %edx, %es
21054+ mov %edx, %ss
21055
21056 /*
21057 * 9. Balance the stack. And because EAX contain the return value,
21058 * we'd better not clobber it.
21059 */
21060- leal efi_rt_function_ptr, %edx
21061- movl (%edx), %ecx
21062- pushl %ecx
21063+ pushl (efi_rt_function_ptr)
21064
21065 /*
21066- * 10. Push the saved return address onto the stack and return.
21067+ * 10. Return to the saved return address.
21068 */
21069- leal saved_return_addr, %edx
21070- movl (%edx), %ecx
21071- pushl %ecx
21072- ret
21073+ jmpl *(saved_return_addr)
21074 ENDPROC(efi_call_phys)
21075 .previous
21076
21077-.data
21078+__INITDATA
21079 saved_return_addr:
21080 .long 0
21081 efi_rt_function_ptr:
21082diff -urNp linux-3.0.4/arch/x86/platform/mrst/mrst.c linux-3.0.4/arch/x86/platform/mrst/mrst.c
21083--- linux-3.0.4/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
21084+++ linux-3.0.4/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
21085@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21086 }
21087
21088 /* Reboot and power off are handled by the SCU on a MID device */
21089-static void mrst_power_off(void)
21090+static __noreturn void mrst_power_off(void)
21091 {
21092 intel_scu_ipc_simple_command(0xf1, 1);
21093+ BUG();
21094 }
21095
21096-static void mrst_reboot(void)
21097+static __noreturn void mrst_reboot(void)
21098 {
21099 intel_scu_ipc_simple_command(0xf1, 0);
21100+ BUG();
21101 }
21102
21103 /*
21104diff -urNp linux-3.0.4/arch/x86/platform/uv/tlb_uv.c linux-3.0.4/arch/x86/platform/uv/tlb_uv.c
21105--- linux-3.0.4/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
21106+++ linux-3.0.4/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
21107@@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
21108 cpumask_t mask;
21109 struct reset_args reset_args;
21110
21111+ pax_track_stack();
21112+
21113 reset_args.sender = sender;
21114 cpus_clear(mask);
21115 /* find a single cpu for each uvhub in this distribution mask */
21116diff -urNp linux-3.0.4/arch/x86/power/cpu.c linux-3.0.4/arch/x86/power/cpu.c
21117--- linux-3.0.4/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
21118+++ linux-3.0.4/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
21119@@ -130,7 +130,7 @@ static void do_fpu_end(void)
21120 static void fix_processor_context(void)
21121 {
21122 int cpu = smp_processor_id();
21123- struct tss_struct *t = &per_cpu(init_tss, cpu);
21124+ struct tss_struct *t = init_tss + cpu;
21125
21126 set_tss_desc(cpu, t); /*
21127 * This just modifies memory; should not be
21128@@ -140,7 +140,9 @@ static void fix_processor_context(void)
21129 */
21130
21131 #ifdef CONFIG_X86_64
21132+ pax_open_kernel();
21133 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21134+ pax_close_kernel();
21135
21136 syscall_init(); /* This sets MSR_*STAR and related */
21137 #endif
21138diff -urNp linux-3.0.4/arch/x86/vdso/Makefile linux-3.0.4/arch/x86/vdso/Makefile
21139--- linux-3.0.4/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
21140+++ linux-3.0.4/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
21141@@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
21142 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21143 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21144
21145-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21146+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21147 GCOV_PROFILE := n
21148
21149 #
21150diff -urNp linux-3.0.4/arch/x86/vdso/vdso32-setup.c linux-3.0.4/arch/x86/vdso/vdso32-setup.c
21151--- linux-3.0.4/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
21152+++ linux-3.0.4/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
21153@@ -25,6 +25,7 @@
21154 #include <asm/tlbflush.h>
21155 #include <asm/vdso.h>
21156 #include <asm/proto.h>
21157+#include <asm/mman.h>
21158
21159 enum {
21160 VDSO_DISABLED = 0,
21161@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21162 void enable_sep_cpu(void)
21163 {
21164 int cpu = get_cpu();
21165- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21166+ struct tss_struct *tss = init_tss + cpu;
21167
21168 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21169 put_cpu();
21170@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21171 gate_vma.vm_start = FIXADDR_USER_START;
21172 gate_vma.vm_end = FIXADDR_USER_END;
21173 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21174- gate_vma.vm_page_prot = __P101;
21175+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21176 /*
21177 * Make sure the vDSO gets into every core dump.
21178 * Dumping its contents makes post-mortem fully interpretable later
21179@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21180 if (compat)
21181 addr = VDSO_HIGH_BASE;
21182 else {
21183- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21184+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21185 if (IS_ERR_VALUE(addr)) {
21186 ret = addr;
21187 goto up_fail;
21188 }
21189 }
21190
21191- current->mm->context.vdso = (void *)addr;
21192+ current->mm->context.vdso = addr;
21193
21194 if (compat_uses_vma || !compat) {
21195 /*
21196@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21197 }
21198
21199 current_thread_info()->sysenter_return =
21200- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21201+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21202
21203 up_fail:
21204 if (ret)
21205- current->mm->context.vdso = NULL;
21206+ current->mm->context.vdso = 0;
21207
21208 up_write(&mm->mmap_sem);
21209
21210@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21211
21212 const char *arch_vma_name(struct vm_area_struct *vma)
21213 {
21214- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21215+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21216 return "[vdso]";
21217+
21218+#ifdef CONFIG_PAX_SEGMEXEC
21219+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21220+ return "[vdso]";
21221+#endif
21222+
21223 return NULL;
21224 }
21225
21226@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21227 * Check to see if the corresponding task was created in compat vdso
21228 * mode.
21229 */
21230- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21231+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21232 return &gate_vma;
21233 return NULL;
21234 }
21235diff -urNp linux-3.0.4/arch/x86/vdso/vma.c linux-3.0.4/arch/x86/vdso/vma.c
21236--- linux-3.0.4/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
21237+++ linux-3.0.4/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
21238@@ -15,18 +15,19 @@
21239 #include <asm/proto.h>
21240 #include <asm/vdso.h>
21241
21242-unsigned int __read_mostly vdso_enabled = 1;
21243-
21244 extern char vdso_start[], vdso_end[];
21245 extern unsigned short vdso_sync_cpuid;
21246+extern char __vsyscall_0;
21247
21248 static struct page **vdso_pages;
21249+static struct page *vsyscall_page;
21250 static unsigned vdso_size;
21251
21252 static int __init init_vdso_vars(void)
21253 {
21254- int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
21255- int i;
21256+ size_t nbytes = vdso_end - vdso_start;
21257+ size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
21258+ size_t i;
21259
21260 vdso_size = npages << PAGE_SHIFT;
21261 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
21262@@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
21263 goto oom;
21264 for (i = 0; i < npages; i++) {
21265 struct page *p;
21266- p = alloc_page(GFP_KERNEL);
21267+ p = alloc_page(GFP_KERNEL | __GFP_ZERO);
21268 if (!p)
21269 goto oom;
21270 vdso_pages[i] = p;
21271- copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
21272+ memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
21273+ nbytes -= PAGE_SIZE;
21274 }
21275+ vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
21276
21277 return 0;
21278
21279 oom:
21280- printk("Cannot allocate vdso\n");
21281- vdso_enabled = 0;
21282- return -ENOMEM;
21283+ panic("Cannot allocate vdso\n");
21284 }
21285 subsys_initcall(init_vdso_vars);
21286
21287@@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
21288 unsigned long addr;
21289 int ret;
21290
21291- if (!vdso_enabled)
21292- return 0;
21293-
21294 down_write(&mm->mmap_sem);
21295- addr = vdso_addr(mm->start_stack, vdso_size);
21296- addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
21297+ addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
21298+ addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
21299 if (IS_ERR_VALUE(addr)) {
21300 ret = addr;
21301 goto up_fail;
21302 }
21303
21304- current->mm->context.vdso = (void *)addr;
21305+ mm->context.vdso = addr + PAGE_SIZE;
21306
21307- ret = install_special_mapping(mm, addr, vdso_size,
21308+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
21309 VM_READ|VM_EXEC|
21310- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21311+ VM_MAYREAD|VM_MAYEXEC|
21312 VM_ALWAYSDUMP,
21313- vdso_pages);
21314+ &vsyscall_page);
21315 if (ret) {
21316- current->mm->context.vdso = NULL;
21317+ mm->context.vdso = 0;
21318 goto up_fail;
21319 }
21320
21321+ ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
21322+ VM_READ|VM_EXEC|
21323+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21324+ VM_ALWAYSDUMP,
21325+ vdso_pages);
21326+ if (ret)
21327+ mm->context.vdso = 0;
21328+
21329 up_fail:
21330 up_write(&mm->mmap_sem);
21331 return ret;
21332 }
21333-
21334-static __init int vdso_setup(char *s)
21335-{
21336- vdso_enabled = simple_strtoul(s, NULL, 0);
21337- return 0;
21338-}
21339-__setup("vdso=", vdso_setup);
21340diff -urNp linux-3.0.4/arch/x86/xen/enlighten.c linux-3.0.4/arch/x86/xen/enlighten.c
21341--- linux-3.0.4/arch/x86/xen/enlighten.c 2011-09-02 18:11:26.000000000 -0400
21342+++ linux-3.0.4/arch/x86/xen/enlighten.c 2011-08-29 23:26:21.000000000 -0400
21343@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21344
21345 struct shared_info xen_dummy_shared_info;
21346
21347-void *xen_initial_gdt;
21348-
21349 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21350 __read_mostly int xen_have_vector_callback;
21351 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21352@@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21353 #endif
21354 };
21355
21356-static void xen_reboot(int reason)
21357+static __noreturn void xen_reboot(int reason)
21358 {
21359 struct sched_shutdown r = { .reason = reason };
21360
21361@@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21362 BUG();
21363 }
21364
21365-static void xen_restart(char *msg)
21366+static __noreturn void xen_restart(char *msg)
21367 {
21368 xen_reboot(SHUTDOWN_reboot);
21369 }
21370
21371-static void xen_emergency_restart(void)
21372+static __noreturn void xen_emergency_restart(void)
21373 {
21374 xen_reboot(SHUTDOWN_reboot);
21375 }
21376
21377-static void xen_machine_halt(void)
21378+static __noreturn void xen_machine_halt(void)
21379 {
21380 xen_reboot(SHUTDOWN_poweroff);
21381 }
21382@@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
21383 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21384
21385 /* Work out if we support NX */
21386- x86_configure_nx();
21387+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21388+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21389+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21390+ unsigned l, h;
21391+
21392+ __supported_pte_mask |= _PAGE_NX;
21393+ rdmsr(MSR_EFER, l, h);
21394+ l |= EFER_NX;
21395+ wrmsr(MSR_EFER, l, h);
21396+ }
21397+#endif
21398
21399 xen_setup_features();
21400
21401@@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
21402
21403 machine_ops = xen_machine_ops;
21404
21405- /*
21406- * The only reliable way to retain the initial address of the
21407- * percpu gdt_page is to remember it here, so we can go and
21408- * mark it RW later, when the initial percpu area is freed.
21409- */
21410- xen_initial_gdt = &per_cpu(gdt_page, 0);
21411-
21412 xen_smp_init();
21413
21414 #ifdef CONFIG_ACPI_NUMA
21415diff -urNp linux-3.0.4/arch/x86/xen/mmu.c linux-3.0.4/arch/x86/xen/mmu.c
21416--- linux-3.0.4/arch/x86/xen/mmu.c 2011-09-02 18:11:26.000000000 -0400
21417+++ linux-3.0.4/arch/x86/xen/mmu.c 2011-08-29 23:26:21.000000000 -0400
21418@@ -1683,6 +1683,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
21419 convert_pfn_mfn(init_level4_pgt);
21420 convert_pfn_mfn(level3_ident_pgt);
21421 convert_pfn_mfn(level3_kernel_pgt);
21422+ convert_pfn_mfn(level3_vmalloc_pgt);
21423+ convert_pfn_mfn(level3_vmemmap_pgt);
21424
21425 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21426 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21427@@ -1701,7 +1703,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
21428 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21429 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21430 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21431+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21432+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21433 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21434+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21435 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21436 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21437
21438@@ -1913,6 +1918,7 @@ static void __init xen_post_allocator_in
21439 pv_mmu_ops.set_pud = xen_set_pud;
21440 #if PAGETABLE_LEVELS == 4
21441 pv_mmu_ops.set_pgd = xen_set_pgd;
21442+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
21443 #endif
21444
21445 /* This will work as long as patching hasn't happened yet
21446@@ -1994,6 +2000,7 @@ static const struct pv_mmu_ops xen_mmu_o
21447 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
21448 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
21449 .set_pgd = xen_set_pgd_hyper,
21450+ .set_pgd_batched = xen_set_pgd_hyper,
21451
21452 .alloc_pud = xen_alloc_pmd_init,
21453 .release_pud = xen_release_pmd_init,
21454diff -urNp linux-3.0.4/arch/x86/xen/smp.c linux-3.0.4/arch/x86/xen/smp.c
21455--- linux-3.0.4/arch/x86/xen/smp.c 2011-09-02 18:11:26.000000000 -0400
21456+++ linux-3.0.4/arch/x86/xen/smp.c 2011-08-29 23:26:21.000000000 -0400
21457@@ -193,11 +193,6 @@ static void __init xen_smp_prepare_boot_
21458 {
21459 BUG_ON(smp_processor_id() != 0);
21460 native_smp_prepare_boot_cpu();
21461-
21462- /* We've switched to the "real" per-cpu gdt, so make sure the
21463- old memory can be recycled */
21464- make_lowmem_page_readwrite(xen_initial_gdt);
21465-
21466 xen_filter_cpu_maps();
21467 xen_setup_vcpu_info_placement();
21468 }
21469@@ -265,12 +260,12 @@ cpu_initialize_context(unsigned int cpu,
21470 gdt = get_cpu_gdt_table(cpu);
21471
21472 ctxt->flags = VGCF_IN_KERNEL;
21473- ctxt->user_regs.ds = __USER_DS;
21474- ctxt->user_regs.es = __USER_DS;
21475+ ctxt->user_regs.ds = __KERNEL_DS;
21476+ ctxt->user_regs.es = __KERNEL_DS;
21477 ctxt->user_regs.ss = __KERNEL_DS;
21478 #ifdef CONFIG_X86_32
21479 ctxt->user_regs.fs = __KERNEL_PERCPU;
21480- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
21481+ savesegment(gs, ctxt->user_regs.gs);
21482 #else
21483 ctxt->gs_base_kernel = per_cpu_offset(cpu);
21484 #endif
21485@@ -321,13 +316,12 @@ static int __cpuinit xen_cpu_up(unsigned
21486 int rc;
21487
21488 per_cpu(current_task, cpu) = idle;
21489+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
21490 #ifdef CONFIG_X86_32
21491 irq_ctx_init(cpu);
21492 #else
21493 clear_tsk_thread_flag(idle, TIF_FORK);
21494- per_cpu(kernel_stack, cpu) =
21495- (unsigned long)task_stack_page(idle) -
21496- KERNEL_STACK_OFFSET + THREAD_SIZE;
21497+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21498 #endif
21499 xen_setup_runstate_info(cpu);
21500 xen_setup_timer(cpu);
21501diff -urNp linux-3.0.4/arch/x86/xen/xen-asm_32.S linux-3.0.4/arch/x86/xen/xen-asm_32.S
21502--- linux-3.0.4/arch/x86/xen/xen-asm_32.S 2011-07-21 22:17:23.000000000 -0400
21503+++ linux-3.0.4/arch/x86/xen/xen-asm_32.S 2011-08-23 21:47:55.000000000 -0400
21504@@ -83,14 +83,14 @@ ENTRY(xen_iret)
21505 ESP_OFFSET=4 # bytes pushed onto stack
21506
21507 /*
21508- * Store vcpu_info pointer for easy access. Do it this way to
21509- * avoid having to reload %fs
21510+ * Store vcpu_info pointer for easy access.
21511 */
21512 #ifdef CONFIG_SMP
21513- GET_THREAD_INFO(%eax)
21514- movl TI_cpu(%eax), %eax
21515- movl __per_cpu_offset(,%eax,4), %eax
21516- mov xen_vcpu(%eax), %eax
21517+ push %fs
21518+ mov $(__KERNEL_PERCPU), %eax
21519+ mov %eax, %fs
21520+ mov PER_CPU_VAR(xen_vcpu), %eax
21521+ pop %fs
21522 #else
21523 movl xen_vcpu, %eax
21524 #endif
21525diff -urNp linux-3.0.4/arch/x86/xen/xen-head.S linux-3.0.4/arch/x86/xen/xen-head.S
21526--- linux-3.0.4/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
21527+++ linux-3.0.4/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
21528@@ -19,6 +19,17 @@ ENTRY(startup_xen)
21529 #ifdef CONFIG_X86_32
21530 mov %esi,xen_start_info
21531 mov $init_thread_union+THREAD_SIZE,%esp
21532+#ifdef CONFIG_SMP
21533+ movl $cpu_gdt_table,%edi
21534+ movl $__per_cpu_load,%eax
21535+ movw %ax,__KERNEL_PERCPU + 2(%edi)
21536+ rorl $16,%eax
21537+ movb %al,__KERNEL_PERCPU + 4(%edi)
21538+ movb %ah,__KERNEL_PERCPU + 7(%edi)
21539+ movl $__per_cpu_end - 1,%eax
21540+ subl $__per_cpu_start,%eax
21541+ movw %ax,__KERNEL_PERCPU + 0(%edi)
21542+#endif
21543 #else
21544 mov %rsi,xen_start_info
21545 mov $init_thread_union+THREAD_SIZE,%rsp
21546diff -urNp linux-3.0.4/arch/x86/xen/xen-ops.h linux-3.0.4/arch/x86/xen/xen-ops.h
21547--- linux-3.0.4/arch/x86/xen/xen-ops.h 2011-09-02 18:11:21.000000000 -0400
21548+++ linux-3.0.4/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
21549@@ -10,8 +10,6 @@
21550 extern const char xen_hypervisor_callback[];
21551 extern const char xen_failsafe_callback[];
21552
21553-extern void *xen_initial_gdt;
21554-
21555 struct trap_info;
21556 void xen_copy_trap_info(struct trap_info *traps);
21557
21558diff -urNp linux-3.0.4/block/blk-iopoll.c linux-3.0.4/block/blk-iopoll.c
21559--- linux-3.0.4/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
21560+++ linux-3.0.4/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
21561@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
21562 }
21563 EXPORT_SYMBOL(blk_iopoll_complete);
21564
21565-static void blk_iopoll_softirq(struct softirq_action *h)
21566+static void blk_iopoll_softirq(void)
21567 {
21568 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
21569 int rearm = 0, budget = blk_iopoll_budget;
21570diff -urNp linux-3.0.4/block/blk-map.c linux-3.0.4/block/blk-map.c
21571--- linux-3.0.4/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
21572+++ linux-3.0.4/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
21573@@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
21574 if (!len || !kbuf)
21575 return -EINVAL;
21576
21577- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
21578+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
21579 if (do_copy)
21580 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
21581 else
21582diff -urNp linux-3.0.4/block/blk-softirq.c linux-3.0.4/block/blk-softirq.c
21583--- linux-3.0.4/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
21584+++ linux-3.0.4/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
21585@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
21586 * Softirq action handler - move entries to local list and loop over them
21587 * while passing them to the queue registered handler.
21588 */
21589-static void blk_done_softirq(struct softirq_action *h)
21590+static void blk_done_softirq(void)
21591 {
21592 struct list_head *cpu_list, local_list;
21593
21594diff -urNp linux-3.0.4/block/bsg.c linux-3.0.4/block/bsg.c
21595--- linux-3.0.4/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
21596+++ linux-3.0.4/block/bsg.c 2011-08-23 21:47:55.000000000 -0400
21597@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
21598 struct sg_io_v4 *hdr, struct bsg_device *bd,
21599 fmode_t has_write_perm)
21600 {
21601+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21602+ unsigned char *cmdptr;
21603+
21604 if (hdr->request_len > BLK_MAX_CDB) {
21605 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
21606 if (!rq->cmd)
21607 return -ENOMEM;
21608- }
21609+ cmdptr = rq->cmd;
21610+ } else
21611+ cmdptr = tmpcmd;
21612
21613- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
21614+ if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
21615 hdr->request_len))
21616 return -EFAULT;
21617
21618+ if (cmdptr != rq->cmd)
21619+ memcpy(rq->cmd, cmdptr, hdr->request_len);
21620+
21621 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
21622 if (blk_verify_command(rq->cmd, has_write_perm))
21623 return -EPERM;
21624diff -urNp linux-3.0.4/block/scsi_ioctl.c linux-3.0.4/block/scsi_ioctl.c
21625--- linux-3.0.4/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
21626+++ linux-3.0.4/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
21627@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
21628 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
21629 struct sg_io_hdr *hdr, fmode_t mode)
21630 {
21631- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
21632+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21633+ unsigned char *cmdptr;
21634+
21635+ if (rq->cmd != rq->__cmd)
21636+ cmdptr = rq->cmd;
21637+ else
21638+ cmdptr = tmpcmd;
21639+
21640+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
21641 return -EFAULT;
21642+
21643+ if (cmdptr != rq->cmd)
21644+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
21645+
21646 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
21647 return -EPERM;
21648
21649@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
21650 int err;
21651 unsigned int in_len, out_len, bytes, opcode, cmdlen;
21652 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
21653+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21654+ unsigned char *cmdptr;
21655
21656 if (!sic)
21657 return -EINVAL;
21658@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
21659 */
21660 err = -EFAULT;
21661 rq->cmd_len = cmdlen;
21662- if (copy_from_user(rq->cmd, sic->data, cmdlen))
21663+
21664+ if (rq->cmd != rq->__cmd)
21665+ cmdptr = rq->cmd;
21666+ else
21667+ cmdptr = tmpcmd;
21668+
21669+ if (copy_from_user(cmdptr, sic->data, cmdlen))
21670 goto error;
21671
21672+ if (rq->cmd != cmdptr)
21673+ memcpy(rq->cmd, cmdptr, cmdlen);
21674+
21675 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
21676 goto error;
21677
21678diff -urNp linux-3.0.4/crypto/cryptd.c linux-3.0.4/crypto/cryptd.c
21679--- linux-3.0.4/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
21680+++ linux-3.0.4/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
21681@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
21682
21683 struct cryptd_blkcipher_request_ctx {
21684 crypto_completion_t complete;
21685-};
21686+} __no_const;
21687
21688 struct cryptd_hash_ctx {
21689 struct crypto_shash *child;
21690@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
21691
21692 struct cryptd_aead_request_ctx {
21693 crypto_completion_t complete;
21694-};
21695+} __no_const;
21696
21697 static void cryptd_queue_worker(struct work_struct *work);
21698
21699diff -urNp linux-3.0.4/crypto/gf128mul.c linux-3.0.4/crypto/gf128mul.c
21700--- linux-3.0.4/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
21701+++ linux-3.0.4/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
21702@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
21703 for (i = 0; i < 7; ++i)
21704 gf128mul_x_lle(&p[i + 1], &p[i]);
21705
21706- memset(r, 0, sizeof(r));
21707+ memset(r, 0, sizeof(*r));
21708 for (i = 0;;) {
21709 u8 ch = ((u8 *)b)[15 - i];
21710
21711@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
21712 for (i = 0; i < 7; ++i)
21713 gf128mul_x_bbe(&p[i + 1], &p[i]);
21714
21715- memset(r, 0, sizeof(r));
21716+ memset(r, 0, sizeof(*r));
21717 for (i = 0;;) {
21718 u8 ch = ((u8 *)b)[i];
21719
21720diff -urNp linux-3.0.4/crypto/serpent.c linux-3.0.4/crypto/serpent.c
21721--- linux-3.0.4/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
21722+++ linux-3.0.4/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
21723@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
21724 u32 r0,r1,r2,r3,r4;
21725 int i;
21726
21727+ pax_track_stack();
21728+
21729 /* Copy key, add padding */
21730
21731 for (i = 0; i < keylen; ++i)
21732diff -urNp linux-3.0.4/Documentation/dontdiff linux-3.0.4/Documentation/dontdiff
21733--- linux-3.0.4/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
21734+++ linux-3.0.4/Documentation/dontdiff 2011-08-23 21:47:55.000000000 -0400
21735@@ -5,6 +5,7 @@
21736 *.cis
21737 *.cpio
21738 *.csp
21739+*.dbg
21740 *.dsp
21741 *.dvi
21742 *.elf
21743@@ -48,9 +49,11 @@
21744 *.tab.h
21745 *.tex
21746 *.ver
21747+*.vim
21748 *.xml
21749 *.xz
21750 *_MODULES
21751+*_reg_safe.h
21752 *_vga16.c
21753 *~
21754 \#*#
21755@@ -70,6 +73,7 @@ Kerntypes
21756 Module.markers
21757 Module.symvers
21758 PENDING
21759+PERF*
21760 SCCS
21761 System.map*
21762 TAGS
21763@@ -98,6 +102,8 @@ bzImage*
21764 capability_names.h
21765 capflags.c
21766 classlist.h*
21767+clut_vga16.c
21768+common-cmds.h
21769 comp*.log
21770 compile.h*
21771 conf
21772@@ -126,12 +132,14 @@ fore200e_pca_fw.c*
21773 gconf
21774 gconf.glade.h
21775 gen-devlist
21776+gen-kdb_cmds.c
21777 gen_crc32table
21778 gen_init_cpio
21779 generated
21780 genheaders
21781 genksyms
21782 *_gray256.c
21783+hash
21784 hpet_example
21785 hugepage-mmap
21786 hugepage-shm
21787@@ -146,7 +154,6 @@ int32.c
21788 int4.c
21789 int8.c
21790 kallsyms
21791-kconfig
21792 keywords.c
21793 ksym.c*
21794 ksym.h*
21795@@ -154,7 +161,6 @@ kxgettext
21796 lkc_defs.h
21797 lex.c
21798 lex.*.c
21799-linux
21800 logo_*.c
21801 logo_*_clut224.c
21802 logo_*_mono.c
21803@@ -174,6 +180,7 @@ mkboot
21804 mkbugboot
21805 mkcpustr
21806 mkdep
21807+mkpiggy
21808 mkprep
21809 mkregtable
21810 mktables
21811@@ -209,6 +216,7 @@ r300_reg_safe.h
21812 r420_reg_safe.h
21813 r600_reg_safe.h
21814 recordmcount
21815+regdb.c
21816 relocs
21817 rlim_names.h
21818 rn50_reg_safe.h
21819@@ -219,6 +227,7 @@ setup
21820 setup.bin
21821 setup.elf
21822 sImage
21823+slabinfo
21824 sm_tbl*
21825 split-include
21826 syscalltab.h
21827@@ -246,7 +255,9 @@ vmlinux
21828 vmlinux-*
21829 vmlinux.aout
21830 vmlinux.bin.all
21831+vmlinux.bin.bz2
21832 vmlinux.lds
21833+vmlinux.relocs
21834 vmlinuz
21835 voffset.h
21836 vsyscall.lds
21837@@ -254,6 +265,7 @@ vsyscall_32.lds
21838 wanxlfw.inc
21839 uImage
21840 unifdef
21841+utsrelease.h
21842 wakeup.bin
21843 wakeup.elf
21844 wakeup.lds
21845diff -urNp linux-3.0.4/Documentation/kernel-parameters.txt linux-3.0.4/Documentation/kernel-parameters.txt
21846--- linux-3.0.4/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
21847+++ linux-3.0.4/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
21848@@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
21849 the specified number of seconds. This is to be used if
21850 your oopses keep scrolling off the screen.
21851
21852+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
21853+ virtualization environments that don't cope well with the
21854+ expand down segment used by UDEREF on X86-32 or the frequent
21855+ page table updates on X86-64.
21856+
21857+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
21858+
21859 pcbit= [HW,ISDN]
21860
21861 pcd. [PARIDE]
21862diff -urNp linux-3.0.4/drivers/acpi/apei/cper.c linux-3.0.4/drivers/acpi/apei/cper.c
21863--- linux-3.0.4/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
21864+++ linux-3.0.4/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
21865@@ -38,12 +38,12 @@
21866 */
21867 u64 cper_next_record_id(void)
21868 {
21869- static atomic64_t seq;
21870+ static atomic64_unchecked_t seq;
21871
21872- if (!atomic64_read(&seq))
21873- atomic64_set(&seq, ((u64)get_seconds()) << 32);
21874+ if (!atomic64_read_unchecked(&seq))
21875+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
21876
21877- return atomic64_inc_return(&seq);
21878+ return atomic64_inc_return_unchecked(&seq);
21879 }
21880 EXPORT_SYMBOL_GPL(cper_next_record_id);
21881
21882diff -urNp linux-3.0.4/drivers/acpi/ec_sys.c linux-3.0.4/drivers/acpi/ec_sys.c
21883--- linux-3.0.4/drivers/acpi/ec_sys.c 2011-07-21 22:17:23.000000000 -0400
21884+++ linux-3.0.4/drivers/acpi/ec_sys.c 2011-08-24 19:06:55.000000000 -0400
21885@@ -11,6 +11,7 @@
21886 #include <linux/kernel.h>
21887 #include <linux/acpi.h>
21888 #include <linux/debugfs.h>
21889+#include <asm/uaccess.h>
21890 #include "internal.h"
21891
21892 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
21893@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
21894 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
21895 */
21896 unsigned int size = EC_SPACE_SIZE;
21897- u8 *data = (u8 *) buf;
21898+ u8 data;
21899 loff_t init_off = *off;
21900 int err = 0;
21901
21902@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
21903 size = count;
21904
21905 while (size) {
21906- err = ec_read(*off, &data[*off - init_off]);
21907+ err = ec_read(*off, &data);
21908 if (err)
21909 return err;
21910+ if (put_user(data, &buf[*off - init_off]))
21911+ return -EFAULT;
21912 *off += 1;
21913 size--;
21914 }
21915@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
21916
21917 unsigned int size = count;
21918 loff_t init_off = *off;
21919- u8 *data = (u8 *) buf;
21920 int err = 0;
21921
21922 if (*off >= EC_SPACE_SIZE)
21923@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
21924 }
21925
21926 while (size) {
21927- u8 byte_write = data[*off - init_off];
21928+ u8 byte_write;
21929+ if (get_user(byte_write, &buf[*off - init_off]))
21930+ return -EFAULT;
21931 err = ec_write(*off, byte_write);
21932 if (err)
21933 return err;
21934diff -urNp linux-3.0.4/drivers/acpi/proc.c linux-3.0.4/drivers/acpi/proc.c
21935--- linux-3.0.4/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
21936+++ linux-3.0.4/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
21937@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
21938 size_t count, loff_t * ppos)
21939 {
21940 struct list_head *node, *next;
21941- char strbuf[5];
21942- char str[5] = "";
21943- unsigned int len = count;
21944-
21945- if (len > 4)
21946- len = 4;
21947- if (len < 0)
21948- return -EFAULT;
21949+ char strbuf[5] = {0};
21950
21951- if (copy_from_user(strbuf, buffer, len))
21952+ if (count > 4)
21953+ count = 4;
21954+ if (copy_from_user(strbuf, buffer, count))
21955 return -EFAULT;
21956- strbuf[len] = '\0';
21957- sscanf(strbuf, "%s", str);
21958+ strbuf[count] = '\0';
21959
21960 mutex_lock(&acpi_device_lock);
21961 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
21962@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
21963 if (!dev->wakeup.flags.valid)
21964 continue;
21965
21966- if (!strncmp(dev->pnp.bus_id, str, 4)) {
21967+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
21968 if (device_can_wakeup(&dev->dev)) {
21969 bool enable = !device_may_wakeup(&dev->dev);
21970 device_set_wakeup_enable(&dev->dev, enable);
21971diff -urNp linux-3.0.4/drivers/acpi/processor_driver.c linux-3.0.4/drivers/acpi/processor_driver.c
21972--- linux-3.0.4/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
21973+++ linux-3.0.4/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
21974@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
21975 return 0;
21976 #endif
21977
21978- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
21979+ BUG_ON(pr->id >= nr_cpu_ids);
21980
21981 /*
21982 * Buggy BIOS check
21983diff -urNp linux-3.0.4/drivers/ata/libata-core.c linux-3.0.4/drivers/ata/libata-core.c
21984--- linux-3.0.4/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
21985+++ linux-3.0.4/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
21986@@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
21987 struct ata_port *ap;
21988 unsigned int tag;
21989
21990- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21991+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21992 ap = qc->ap;
21993
21994 qc->flags = 0;
21995@@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
21996 struct ata_port *ap;
21997 struct ata_link *link;
21998
21999- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22000+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22001 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
22002 ap = qc->ap;
22003 link = qc->dev->link;
22004@@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
22005 return;
22006
22007 spin_lock(&lock);
22008+ pax_open_kernel();
22009
22010 for (cur = ops->inherits; cur; cur = cur->inherits) {
22011 void **inherit = (void **)cur;
22012@@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
22013 if (IS_ERR(*pp))
22014 *pp = NULL;
22015
22016- ops->inherits = NULL;
22017+ *(struct ata_port_operations **)&ops->inherits = NULL;
22018
22019+ pax_close_kernel();
22020 spin_unlock(&lock);
22021 }
22022
22023diff -urNp linux-3.0.4/drivers/ata/libata-eh.c linux-3.0.4/drivers/ata/libata-eh.c
22024--- linux-3.0.4/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
22025+++ linux-3.0.4/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
22026@@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22027 {
22028 struct ata_link *link;
22029
22030+ pax_track_stack();
22031+
22032 ata_for_each_link(link, ap, HOST_FIRST)
22033 ata_eh_link_report(link);
22034 }
22035diff -urNp linux-3.0.4/drivers/ata/pata_arasan_cf.c linux-3.0.4/drivers/ata/pata_arasan_cf.c
22036--- linux-3.0.4/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
22037+++ linux-3.0.4/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
22038@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22039 /* Handle platform specific quirks */
22040 if (pdata->quirk) {
22041 if (pdata->quirk & CF_BROKEN_PIO) {
22042- ap->ops->set_piomode = NULL;
22043+ pax_open_kernel();
22044+ *(void **)&ap->ops->set_piomode = NULL;
22045+ pax_close_kernel();
22046 ap->pio_mask = 0;
22047 }
22048 if (pdata->quirk & CF_BROKEN_MWDMA)
22049diff -urNp linux-3.0.4/drivers/atm/adummy.c linux-3.0.4/drivers/atm/adummy.c
22050--- linux-3.0.4/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
22051+++ linux-3.0.4/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
22052@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22053 vcc->pop(vcc, skb);
22054 else
22055 dev_kfree_skb_any(skb);
22056- atomic_inc(&vcc->stats->tx);
22057+ atomic_inc_unchecked(&vcc->stats->tx);
22058
22059 return 0;
22060 }
22061diff -urNp linux-3.0.4/drivers/atm/ambassador.c linux-3.0.4/drivers/atm/ambassador.c
22062--- linux-3.0.4/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
22063+++ linux-3.0.4/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
22064@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22065 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22066
22067 // VC layer stats
22068- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22069+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22070
22071 // free the descriptor
22072 kfree (tx_descr);
22073@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22074 dump_skb ("<<<", vc, skb);
22075
22076 // VC layer stats
22077- atomic_inc(&atm_vcc->stats->rx);
22078+ atomic_inc_unchecked(&atm_vcc->stats->rx);
22079 __net_timestamp(skb);
22080 // end of our responsibility
22081 atm_vcc->push (atm_vcc, skb);
22082@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22083 } else {
22084 PRINTK (KERN_INFO, "dropped over-size frame");
22085 // should we count this?
22086- atomic_inc(&atm_vcc->stats->rx_drop);
22087+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22088 }
22089
22090 } else {
22091@@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22092 }
22093
22094 if (check_area (skb->data, skb->len)) {
22095- atomic_inc(&atm_vcc->stats->tx_err);
22096+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22097 return -ENOMEM; // ?
22098 }
22099
22100diff -urNp linux-3.0.4/drivers/atm/atmtcp.c linux-3.0.4/drivers/atm/atmtcp.c
22101--- linux-3.0.4/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
22102+++ linux-3.0.4/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
22103@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22104 if (vcc->pop) vcc->pop(vcc,skb);
22105 else dev_kfree_skb(skb);
22106 if (dev_data) return 0;
22107- atomic_inc(&vcc->stats->tx_err);
22108+ atomic_inc_unchecked(&vcc->stats->tx_err);
22109 return -ENOLINK;
22110 }
22111 size = skb->len+sizeof(struct atmtcp_hdr);
22112@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22113 if (!new_skb) {
22114 if (vcc->pop) vcc->pop(vcc,skb);
22115 else dev_kfree_skb(skb);
22116- atomic_inc(&vcc->stats->tx_err);
22117+ atomic_inc_unchecked(&vcc->stats->tx_err);
22118 return -ENOBUFS;
22119 }
22120 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22121@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22122 if (vcc->pop) vcc->pop(vcc,skb);
22123 else dev_kfree_skb(skb);
22124 out_vcc->push(out_vcc,new_skb);
22125- atomic_inc(&vcc->stats->tx);
22126- atomic_inc(&out_vcc->stats->rx);
22127+ atomic_inc_unchecked(&vcc->stats->tx);
22128+ atomic_inc_unchecked(&out_vcc->stats->rx);
22129 return 0;
22130 }
22131
22132@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22133 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22134 read_unlock(&vcc_sklist_lock);
22135 if (!out_vcc) {
22136- atomic_inc(&vcc->stats->tx_err);
22137+ atomic_inc_unchecked(&vcc->stats->tx_err);
22138 goto done;
22139 }
22140 skb_pull(skb,sizeof(struct atmtcp_hdr));
22141@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22142 __net_timestamp(new_skb);
22143 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22144 out_vcc->push(out_vcc,new_skb);
22145- atomic_inc(&vcc->stats->tx);
22146- atomic_inc(&out_vcc->stats->rx);
22147+ atomic_inc_unchecked(&vcc->stats->tx);
22148+ atomic_inc_unchecked(&out_vcc->stats->rx);
22149 done:
22150 if (vcc->pop) vcc->pop(vcc,skb);
22151 else dev_kfree_skb(skb);
22152diff -urNp linux-3.0.4/drivers/atm/eni.c linux-3.0.4/drivers/atm/eni.c
22153--- linux-3.0.4/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
22154+++ linux-3.0.4/drivers/atm/eni.c 2011-08-23 21:47:55.000000000 -0400
22155@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22156 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22157 vcc->dev->number);
22158 length = 0;
22159- atomic_inc(&vcc->stats->rx_err);
22160+ atomic_inc_unchecked(&vcc->stats->rx_err);
22161 }
22162 else {
22163 length = ATM_CELL_SIZE-1; /* no HEC */
22164@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22165 size);
22166 }
22167 eff = length = 0;
22168- atomic_inc(&vcc->stats->rx_err);
22169+ atomic_inc_unchecked(&vcc->stats->rx_err);
22170 }
22171 else {
22172 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22173@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22174 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22175 vcc->dev->number,vcc->vci,length,size << 2,descr);
22176 length = eff = 0;
22177- atomic_inc(&vcc->stats->rx_err);
22178+ atomic_inc_unchecked(&vcc->stats->rx_err);
22179 }
22180 }
22181 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22182@@ -771,7 +771,7 @@ rx_dequeued++;
22183 vcc->push(vcc,skb);
22184 pushed++;
22185 }
22186- atomic_inc(&vcc->stats->rx);
22187+ atomic_inc_unchecked(&vcc->stats->rx);
22188 }
22189 wake_up(&eni_dev->rx_wait);
22190 }
22191@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22192 PCI_DMA_TODEVICE);
22193 if (vcc->pop) vcc->pop(vcc,skb);
22194 else dev_kfree_skb_irq(skb);
22195- atomic_inc(&vcc->stats->tx);
22196+ atomic_inc_unchecked(&vcc->stats->tx);
22197 wake_up(&eni_dev->tx_wait);
22198 dma_complete++;
22199 }
22200diff -urNp linux-3.0.4/drivers/atm/firestream.c linux-3.0.4/drivers/atm/firestream.c
22201--- linux-3.0.4/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
22202+++ linux-3.0.4/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
22203@@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22204 }
22205 }
22206
22207- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22208+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22209
22210 fs_dprintk (FS_DEBUG_TXMEM, "i");
22211 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22212@@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22213 #endif
22214 skb_put (skb, qe->p1 & 0xffff);
22215 ATM_SKB(skb)->vcc = atm_vcc;
22216- atomic_inc(&atm_vcc->stats->rx);
22217+ atomic_inc_unchecked(&atm_vcc->stats->rx);
22218 __net_timestamp(skb);
22219 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22220 atm_vcc->push (atm_vcc, skb);
22221@@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22222 kfree (pe);
22223 }
22224 if (atm_vcc)
22225- atomic_inc(&atm_vcc->stats->rx_drop);
22226+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22227 break;
22228 case 0x1f: /* Reassembly abort: no buffers. */
22229 /* Silently increment error counter. */
22230 if (atm_vcc)
22231- atomic_inc(&atm_vcc->stats->rx_drop);
22232+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22233 break;
22234 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22235 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22236diff -urNp linux-3.0.4/drivers/atm/fore200e.c linux-3.0.4/drivers/atm/fore200e.c
22237--- linux-3.0.4/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
22238+++ linux-3.0.4/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
22239@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22240 #endif
22241 /* check error condition */
22242 if (*entry->status & STATUS_ERROR)
22243- atomic_inc(&vcc->stats->tx_err);
22244+ atomic_inc_unchecked(&vcc->stats->tx_err);
22245 else
22246- atomic_inc(&vcc->stats->tx);
22247+ atomic_inc_unchecked(&vcc->stats->tx);
22248 }
22249 }
22250
22251@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22252 if (skb == NULL) {
22253 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22254
22255- atomic_inc(&vcc->stats->rx_drop);
22256+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22257 return -ENOMEM;
22258 }
22259
22260@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22261
22262 dev_kfree_skb_any(skb);
22263
22264- atomic_inc(&vcc->stats->rx_drop);
22265+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22266 return -ENOMEM;
22267 }
22268
22269 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22270
22271 vcc->push(vcc, skb);
22272- atomic_inc(&vcc->stats->rx);
22273+ atomic_inc_unchecked(&vcc->stats->rx);
22274
22275 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22276
22277@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22278 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22279 fore200e->atm_dev->number,
22280 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22281- atomic_inc(&vcc->stats->rx_err);
22282+ atomic_inc_unchecked(&vcc->stats->rx_err);
22283 }
22284 }
22285
22286@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22287 goto retry_here;
22288 }
22289
22290- atomic_inc(&vcc->stats->tx_err);
22291+ atomic_inc_unchecked(&vcc->stats->tx_err);
22292
22293 fore200e->tx_sat++;
22294 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22295diff -urNp linux-3.0.4/drivers/atm/he.c linux-3.0.4/drivers/atm/he.c
22296--- linux-3.0.4/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
22297+++ linux-3.0.4/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
22298@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22299
22300 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22301 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22302- atomic_inc(&vcc->stats->rx_drop);
22303+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22304 goto return_host_buffers;
22305 }
22306
22307@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22308 RBRQ_LEN_ERR(he_dev->rbrq_head)
22309 ? "LEN_ERR" : "",
22310 vcc->vpi, vcc->vci);
22311- atomic_inc(&vcc->stats->rx_err);
22312+ atomic_inc_unchecked(&vcc->stats->rx_err);
22313 goto return_host_buffers;
22314 }
22315
22316@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22317 vcc->push(vcc, skb);
22318 spin_lock(&he_dev->global_lock);
22319
22320- atomic_inc(&vcc->stats->rx);
22321+ atomic_inc_unchecked(&vcc->stats->rx);
22322
22323 return_host_buffers:
22324 ++pdus_assembled;
22325@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22326 tpd->vcc->pop(tpd->vcc, tpd->skb);
22327 else
22328 dev_kfree_skb_any(tpd->skb);
22329- atomic_inc(&tpd->vcc->stats->tx_err);
22330+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22331 }
22332 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22333 return;
22334@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22335 vcc->pop(vcc, skb);
22336 else
22337 dev_kfree_skb_any(skb);
22338- atomic_inc(&vcc->stats->tx_err);
22339+ atomic_inc_unchecked(&vcc->stats->tx_err);
22340 return -EINVAL;
22341 }
22342
22343@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22344 vcc->pop(vcc, skb);
22345 else
22346 dev_kfree_skb_any(skb);
22347- atomic_inc(&vcc->stats->tx_err);
22348+ atomic_inc_unchecked(&vcc->stats->tx_err);
22349 return -EINVAL;
22350 }
22351 #endif
22352@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22353 vcc->pop(vcc, skb);
22354 else
22355 dev_kfree_skb_any(skb);
22356- atomic_inc(&vcc->stats->tx_err);
22357+ atomic_inc_unchecked(&vcc->stats->tx_err);
22358 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22359 return -ENOMEM;
22360 }
22361@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22362 vcc->pop(vcc, skb);
22363 else
22364 dev_kfree_skb_any(skb);
22365- atomic_inc(&vcc->stats->tx_err);
22366+ atomic_inc_unchecked(&vcc->stats->tx_err);
22367 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22368 return -ENOMEM;
22369 }
22370@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22371 __enqueue_tpd(he_dev, tpd, cid);
22372 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22373
22374- atomic_inc(&vcc->stats->tx);
22375+ atomic_inc_unchecked(&vcc->stats->tx);
22376
22377 return 0;
22378 }
22379diff -urNp linux-3.0.4/drivers/atm/horizon.c linux-3.0.4/drivers/atm/horizon.c
22380--- linux-3.0.4/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
22381+++ linux-3.0.4/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
22382@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22383 {
22384 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22385 // VC layer stats
22386- atomic_inc(&vcc->stats->rx);
22387+ atomic_inc_unchecked(&vcc->stats->rx);
22388 __net_timestamp(skb);
22389 // end of our responsibility
22390 vcc->push (vcc, skb);
22391@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22392 dev->tx_iovec = NULL;
22393
22394 // VC layer stats
22395- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22396+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22397
22398 // free the skb
22399 hrz_kfree_skb (skb);
22400diff -urNp linux-3.0.4/drivers/atm/idt77252.c linux-3.0.4/drivers/atm/idt77252.c
22401--- linux-3.0.4/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
22402+++ linux-3.0.4/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
22403@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22404 else
22405 dev_kfree_skb(skb);
22406
22407- atomic_inc(&vcc->stats->tx);
22408+ atomic_inc_unchecked(&vcc->stats->tx);
22409 }
22410
22411 atomic_dec(&scq->used);
22412@@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22413 if ((sb = dev_alloc_skb(64)) == NULL) {
22414 printk("%s: Can't allocate buffers for aal0.\n",
22415 card->name);
22416- atomic_add(i, &vcc->stats->rx_drop);
22417+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
22418 break;
22419 }
22420 if (!atm_charge(vcc, sb->truesize)) {
22421 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22422 card->name);
22423- atomic_add(i - 1, &vcc->stats->rx_drop);
22424+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22425 dev_kfree_skb(sb);
22426 break;
22427 }
22428@@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22429 ATM_SKB(sb)->vcc = vcc;
22430 __net_timestamp(sb);
22431 vcc->push(vcc, sb);
22432- atomic_inc(&vcc->stats->rx);
22433+ atomic_inc_unchecked(&vcc->stats->rx);
22434
22435 cell += ATM_CELL_PAYLOAD;
22436 }
22437@@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22438 "(CDC: %08x)\n",
22439 card->name, len, rpp->len, readl(SAR_REG_CDC));
22440 recycle_rx_pool_skb(card, rpp);
22441- atomic_inc(&vcc->stats->rx_err);
22442+ atomic_inc_unchecked(&vcc->stats->rx_err);
22443 return;
22444 }
22445 if (stat & SAR_RSQE_CRC) {
22446 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22447 recycle_rx_pool_skb(card, rpp);
22448- atomic_inc(&vcc->stats->rx_err);
22449+ atomic_inc_unchecked(&vcc->stats->rx_err);
22450 return;
22451 }
22452 if (skb_queue_len(&rpp->queue) > 1) {
22453@@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22454 RXPRINTK("%s: Can't alloc RX skb.\n",
22455 card->name);
22456 recycle_rx_pool_skb(card, rpp);
22457- atomic_inc(&vcc->stats->rx_err);
22458+ atomic_inc_unchecked(&vcc->stats->rx_err);
22459 return;
22460 }
22461 if (!atm_charge(vcc, skb->truesize)) {
22462@@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22463 __net_timestamp(skb);
22464
22465 vcc->push(vcc, skb);
22466- atomic_inc(&vcc->stats->rx);
22467+ atomic_inc_unchecked(&vcc->stats->rx);
22468
22469 return;
22470 }
22471@@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22472 __net_timestamp(skb);
22473
22474 vcc->push(vcc, skb);
22475- atomic_inc(&vcc->stats->rx);
22476+ atomic_inc_unchecked(&vcc->stats->rx);
22477
22478 if (skb->truesize > SAR_FB_SIZE_3)
22479 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22480@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22481 if (vcc->qos.aal != ATM_AAL0) {
22482 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22483 card->name, vpi, vci);
22484- atomic_inc(&vcc->stats->rx_drop);
22485+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22486 goto drop;
22487 }
22488
22489 if ((sb = dev_alloc_skb(64)) == NULL) {
22490 printk("%s: Can't allocate buffers for AAL0.\n",
22491 card->name);
22492- atomic_inc(&vcc->stats->rx_err);
22493+ atomic_inc_unchecked(&vcc->stats->rx_err);
22494 goto drop;
22495 }
22496
22497@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22498 ATM_SKB(sb)->vcc = vcc;
22499 __net_timestamp(sb);
22500 vcc->push(vcc, sb);
22501- atomic_inc(&vcc->stats->rx);
22502+ atomic_inc_unchecked(&vcc->stats->rx);
22503
22504 drop:
22505 skb_pull(queue, 64);
22506@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22507
22508 if (vc == NULL) {
22509 printk("%s: NULL connection in send().\n", card->name);
22510- atomic_inc(&vcc->stats->tx_err);
22511+ atomic_inc_unchecked(&vcc->stats->tx_err);
22512 dev_kfree_skb(skb);
22513 return -EINVAL;
22514 }
22515 if (!test_bit(VCF_TX, &vc->flags)) {
22516 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22517- atomic_inc(&vcc->stats->tx_err);
22518+ atomic_inc_unchecked(&vcc->stats->tx_err);
22519 dev_kfree_skb(skb);
22520 return -EINVAL;
22521 }
22522@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22523 break;
22524 default:
22525 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22526- atomic_inc(&vcc->stats->tx_err);
22527+ atomic_inc_unchecked(&vcc->stats->tx_err);
22528 dev_kfree_skb(skb);
22529 return -EINVAL;
22530 }
22531
22532 if (skb_shinfo(skb)->nr_frags != 0) {
22533 printk("%s: No scatter-gather yet.\n", card->name);
22534- atomic_inc(&vcc->stats->tx_err);
22535+ atomic_inc_unchecked(&vcc->stats->tx_err);
22536 dev_kfree_skb(skb);
22537 return -EINVAL;
22538 }
22539@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22540
22541 err = queue_skb(card, vc, skb, oam);
22542 if (err) {
22543- atomic_inc(&vcc->stats->tx_err);
22544+ atomic_inc_unchecked(&vcc->stats->tx_err);
22545 dev_kfree_skb(skb);
22546 return err;
22547 }
22548@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22549 skb = dev_alloc_skb(64);
22550 if (!skb) {
22551 printk("%s: Out of memory in send_oam().\n", card->name);
22552- atomic_inc(&vcc->stats->tx_err);
22553+ atomic_inc_unchecked(&vcc->stats->tx_err);
22554 return -ENOMEM;
22555 }
22556 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22557diff -urNp linux-3.0.4/drivers/atm/iphase.c linux-3.0.4/drivers/atm/iphase.c
22558--- linux-3.0.4/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
22559+++ linux-3.0.4/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
22560@@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
22561 status = (u_short) (buf_desc_ptr->desc_mode);
22562 if (status & (RX_CER | RX_PTE | RX_OFL))
22563 {
22564- atomic_inc(&vcc->stats->rx_err);
22565+ atomic_inc_unchecked(&vcc->stats->rx_err);
22566 IF_ERR(printk("IA: bad packet, dropping it");)
22567 if (status & RX_CER) {
22568 IF_ERR(printk(" cause: packet CRC error\n");)
22569@@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
22570 len = dma_addr - buf_addr;
22571 if (len > iadev->rx_buf_sz) {
22572 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22573- atomic_inc(&vcc->stats->rx_err);
22574+ atomic_inc_unchecked(&vcc->stats->rx_err);
22575 goto out_free_desc;
22576 }
22577
22578@@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
22579 ia_vcc = INPH_IA_VCC(vcc);
22580 if (ia_vcc == NULL)
22581 {
22582- atomic_inc(&vcc->stats->rx_err);
22583+ atomic_inc_unchecked(&vcc->stats->rx_err);
22584 dev_kfree_skb_any(skb);
22585 atm_return(vcc, atm_guess_pdu2truesize(len));
22586 goto INCR_DLE;
22587@@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
22588 if ((length > iadev->rx_buf_sz) || (length >
22589 (skb->len - sizeof(struct cpcs_trailer))))
22590 {
22591- atomic_inc(&vcc->stats->rx_err);
22592+ atomic_inc_unchecked(&vcc->stats->rx_err);
22593 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
22594 length, skb->len);)
22595 dev_kfree_skb_any(skb);
22596@@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
22597
22598 IF_RX(printk("rx_dle_intr: skb push");)
22599 vcc->push(vcc,skb);
22600- atomic_inc(&vcc->stats->rx);
22601+ atomic_inc_unchecked(&vcc->stats->rx);
22602 iadev->rx_pkt_cnt++;
22603 }
22604 INCR_DLE:
22605@@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
22606 {
22607 struct k_sonet_stats *stats;
22608 stats = &PRIV(_ia_dev[board])->sonet_stats;
22609- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22610- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
22611- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
22612- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
22613- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
22614- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
22615- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22616- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
22617- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
22618+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22619+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
22620+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
22621+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
22622+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
22623+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22624+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22625+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
22626+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
22627 }
22628 ia_cmds.status = 0;
22629 break;
22630@@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22631 if ((desc == 0) || (desc > iadev->num_tx_desc))
22632 {
22633 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
22634- atomic_inc(&vcc->stats->tx);
22635+ atomic_inc_unchecked(&vcc->stats->tx);
22636 if (vcc->pop)
22637 vcc->pop(vcc, skb);
22638 else
22639@@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22640 ATM_DESC(skb) = vcc->vci;
22641 skb_queue_tail(&iadev->tx_dma_q, skb);
22642
22643- atomic_inc(&vcc->stats->tx);
22644+ atomic_inc_unchecked(&vcc->stats->tx);
22645 iadev->tx_pkt_cnt++;
22646 /* Increment transaction counter */
22647 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
22648
22649 #if 0
22650 /* add flow control logic */
22651- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22652+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22653 if (iavcc->vc_desc_cnt > 10) {
22654 vcc->tx_quota = vcc->tx_quota * 3 / 4;
22655 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22656diff -urNp linux-3.0.4/drivers/atm/lanai.c linux-3.0.4/drivers/atm/lanai.c
22657--- linux-3.0.4/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
22658+++ linux-3.0.4/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
22659@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22660 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22661 lanai_endtx(lanai, lvcc);
22662 lanai_free_skb(lvcc->tx.atmvcc, skb);
22663- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22664+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22665 }
22666
22667 /* Try to fill the buffer - don't call unless there is backlog */
22668@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22669 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22670 __net_timestamp(skb);
22671 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22672- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22673+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22674 out:
22675 lvcc->rx.buf.ptr = end;
22676 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22677@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22678 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22679 "vcc %d\n", lanai->number, (unsigned int) s, vci);
22680 lanai->stats.service_rxnotaal5++;
22681- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22682+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22683 return 0;
22684 }
22685 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22686@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22687 int bytes;
22688 read_unlock(&vcc_sklist_lock);
22689 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22690- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22691+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22692 lvcc->stats.x.aal5.service_trash++;
22693 bytes = (SERVICE_GET_END(s) * 16) -
22694 (((unsigned long) lvcc->rx.buf.ptr) -
22695@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22696 }
22697 if (s & SERVICE_STREAM) {
22698 read_unlock(&vcc_sklist_lock);
22699- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22700+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22701 lvcc->stats.x.aal5.service_stream++;
22702 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22703 "PDU on VCI %d!\n", lanai->number, vci);
22704@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22705 return 0;
22706 }
22707 DPRINTK("got rx crc error on vci %d\n", vci);
22708- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22709+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22710 lvcc->stats.x.aal5.service_rxcrc++;
22711 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22712 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22713diff -urNp linux-3.0.4/drivers/atm/nicstar.c linux-3.0.4/drivers/atm/nicstar.c
22714--- linux-3.0.4/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
22715+++ linux-3.0.4/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
22716@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
22717 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
22718 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
22719 card->index);
22720- atomic_inc(&vcc->stats->tx_err);
22721+ atomic_inc_unchecked(&vcc->stats->tx_err);
22722 dev_kfree_skb_any(skb);
22723 return -EINVAL;
22724 }
22725@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
22726 if (!vc->tx) {
22727 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
22728 card->index);
22729- atomic_inc(&vcc->stats->tx_err);
22730+ atomic_inc_unchecked(&vcc->stats->tx_err);
22731 dev_kfree_skb_any(skb);
22732 return -EINVAL;
22733 }
22734@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
22735 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
22736 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
22737 card->index);
22738- atomic_inc(&vcc->stats->tx_err);
22739+ atomic_inc_unchecked(&vcc->stats->tx_err);
22740 dev_kfree_skb_any(skb);
22741 return -EINVAL;
22742 }
22743
22744 if (skb_shinfo(skb)->nr_frags != 0) {
22745 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22746- atomic_inc(&vcc->stats->tx_err);
22747+ atomic_inc_unchecked(&vcc->stats->tx_err);
22748 dev_kfree_skb_any(skb);
22749 return -EINVAL;
22750 }
22751@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
22752 }
22753
22754 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
22755- atomic_inc(&vcc->stats->tx_err);
22756+ atomic_inc_unchecked(&vcc->stats->tx_err);
22757 dev_kfree_skb_any(skb);
22758 return -EIO;
22759 }
22760- atomic_inc(&vcc->stats->tx);
22761+ atomic_inc_unchecked(&vcc->stats->tx);
22762
22763 return 0;
22764 }
22765@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
22766 printk
22767 ("nicstar%d: Can't allocate buffers for aal0.\n",
22768 card->index);
22769- atomic_add(i, &vcc->stats->rx_drop);
22770+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
22771 break;
22772 }
22773 if (!atm_charge(vcc, sb->truesize)) {
22774 RXPRINTK
22775 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
22776 card->index);
22777- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22778+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22779 dev_kfree_skb_any(sb);
22780 break;
22781 }
22782@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
22783 ATM_SKB(sb)->vcc = vcc;
22784 __net_timestamp(sb);
22785 vcc->push(vcc, sb);
22786- atomic_inc(&vcc->stats->rx);
22787+ atomic_inc_unchecked(&vcc->stats->rx);
22788 cell += ATM_CELL_PAYLOAD;
22789 }
22790
22791@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
22792 if (iovb == NULL) {
22793 printk("nicstar%d: Out of iovec buffers.\n",
22794 card->index);
22795- atomic_inc(&vcc->stats->rx_drop);
22796+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22797 recycle_rx_buf(card, skb);
22798 return;
22799 }
22800@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
22801 small or large buffer itself. */
22802 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
22803 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22804- atomic_inc(&vcc->stats->rx_err);
22805+ atomic_inc_unchecked(&vcc->stats->rx_err);
22806 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22807 NS_MAX_IOVECS);
22808 NS_PRV_IOVCNT(iovb) = 0;
22809@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
22810 ("nicstar%d: Expected a small buffer, and this is not one.\n",
22811 card->index);
22812 which_list(card, skb);
22813- atomic_inc(&vcc->stats->rx_err);
22814+ atomic_inc_unchecked(&vcc->stats->rx_err);
22815 recycle_rx_buf(card, skb);
22816 vc->rx_iov = NULL;
22817 recycle_iov_buf(card, iovb);
22818@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
22819 ("nicstar%d: Expected a large buffer, and this is not one.\n",
22820 card->index);
22821 which_list(card, skb);
22822- atomic_inc(&vcc->stats->rx_err);
22823+ atomic_inc_unchecked(&vcc->stats->rx_err);
22824 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22825 NS_PRV_IOVCNT(iovb));
22826 vc->rx_iov = NULL;
22827@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
22828 printk(" - PDU size mismatch.\n");
22829 else
22830 printk(".\n");
22831- atomic_inc(&vcc->stats->rx_err);
22832+ atomic_inc_unchecked(&vcc->stats->rx_err);
22833 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22834 NS_PRV_IOVCNT(iovb));
22835 vc->rx_iov = NULL;
22836@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
22837 /* skb points to a small buffer */
22838 if (!atm_charge(vcc, skb->truesize)) {
22839 push_rxbufs(card, skb);
22840- atomic_inc(&vcc->stats->rx_drop);
22841+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22842 } else {
22843 skb_put(skb, len);
22844 dequeue_sm_buf(card, skb);
22845@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
22846 ATM_SKB(skb)->vcc = vcc;
22847 __net_timestamp(skb);
22848 vcc->push(vcc, skb);
22849- atomic_inc(&vcc->stats->rx);
22850+ atomic_inc_unchecked(&vcc->stats->rx);
22851 }
22852 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
22853 struct sk_buff *sb;
22854@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
22855 if (len <= NS_SMBUFSIZE) {
22856 if (!atm_charge(vcc, sb->truesize)) {
22857 push_rxbufs(card, sb);
22858- atomic_inc(&vcc->stats->rx_drop);
22859+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22860 } else {
22861 skb_put(sb, len);
22862 dequeue_sm_buf(card, sb);
22863@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
22864 ATM_SKB(sb)->vcc = vcc;
22865 __net_timestamp(sb);
22866 vcc->push(vcc, sb);
22867- atomic_inc(&vcc->stats->rx);
22868+ atomic_inc_unchecked(&vcc->stats->rx);
22869 }
22870
22871 push_rxbufs(card, skb);
22872@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
22873
22874 if (!atm_charge(vcc, skb->truesize)) {
22875 push_rxbufs(card, skb);
22876- atomic_inc(&vcc->stats->rx_drop);
22877+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22878 } else {
22879 dequeue_lg_buf(card, skb);
22880 #ifdef NS_USE_DESTRUCTORS
22881@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
22882 ATM_SKB(skb)->vcc = vcc;
22883 __net_timestamp(skb);
22884 vcc->push(vcc, skb);
22885- atomic_inc(&vcc->stats->rx);
22886+ atomic_inc_unchecked(&vcc->stats->rx);
22887 }
22888
22889 push_rxbufs(card, sb);
22890@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
22891 printk
22892 ("nicstar%d: Out of huge buffers.\n",
22893 card->index);
22894- atomic_inc(&vcc->stats->rx_drop);
22895+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22896 recycle_iovec_rx_bufs(card,
22897 (struct iovec *)
22898 iovb->data,
22899@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
22900 card->hbpool.count++;
22901 } else
22902 dev_kfree_skb_any(hb);
22903- atomic_inc(&vcc->stats->rx_drop);
22904+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22905 } else {
22906 /* Copy the small buffer to the huge buffer */
22907 sb = (struct sk_buff *)iov->iov_base;
22908@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
22909 #endif /* NS_USE_DESTRUCTORS */
22910 __net_timestamp(hb);
22911 vcc->push(vcc, hb);
22912- atomic_inc(&vcc->stats->rx);
22913+ atomic_inc_unchecked(&vcc->stats->rx);
22914 }
22915 }
22916
22917diff -urNp linux-3.0.4/drivers/atm/solos-pci.c linux-3.0.4/drivers/atm/solos-pci.c
22918--- linux-3.0.4/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
22919+++ linux-3.0.4/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
22920@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
22921 }
22922 atm_charge(vcc, skb->truesize);
22923 vcc->push(vcc, skb);
22924- atomic_inc(&vcc->stats->rx);
22925+ atomic_inc_unchecked(&vcc->stats->rx);
22926 break;
22927
22928 case PKT_STATUS:
22929@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
22930 char msg[500];
22931 char item[10];
22932
22933+ pax_track_stack();
22934+
22935 len = buf->len;
22936 for (i = 0; i < len; i++){
22937 if(i % 8 == 0)
22938@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
22939 vcc = SKB_CB(oldskb)->vcc;
22940
22941 if (vcc) {
22942- atomic_inc(&vcc->stats->tx);
22943+ atomic_inc_unchecked(&vcc->stats->tx);
22944 solos_pop(vcc, oldskb);
22945 } else
22946 dev_kfree_skb_irq(oldskb);
22947diff -urNp linux-3.0.4/drivers/atm/suni.c linux-3.0.4/drivers/atm/suni.c
22948--- linux-3.0.4/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
22949+++ linux-3.0.4/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
22950@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
22951
22952
22953 #define ADD_LIMITED(s,v) \
22954- atomic_add((v),&stats->s); \
22955- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
22956+ atomic_add_unchecked((v),&stats->s); \
22957+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
22958
22959
22960 static void suni_hz(unsigned long from_timer)
22961diff -urNp linux-3.0.4/drivers/atm/uPD98402.c linux-3.0.4/drivers/atm/uPD98402.c
22962--- linux-3.0.4/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
22963+++ linux-3.0.4/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
22964@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
22965 struct sonet_stats tmp;
22966 int error = 0;
22967
22968- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22969+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22970 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
22971 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
22972 if (zero && !error) {
22973@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
22974
22975
22976 #define ADD_LIMITED(s,v) \
22977- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
22978- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
22979- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22980+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
22981+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
22982+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22983
22984
22985 static void stat_event(struct atm_dev *dev)
22986@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
22987 if (reason & uPD98402_INT_PFM) stat_event(dev);
22988 if (reason & uPD98402_INT_PCO) {
22989 (void) GET(PCOCR); /* clear interrupt cause */
22990- atomic_add(GET(HECCT),
22991+ atomic_add_unchecked(GET(HECCT),
22992 &PRIV(dev)->sonet_stats.uncorr_hcs);
22993 }
22994 if ((reason & uPD98402_INT_RFO) &&
22995@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
22996 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
22997 uPD98402_INT_LOS),PIMR); /* enable them */
22998 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
22999- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23000- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
23001- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
23002+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23003+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
23004+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
23005 return 0;
23006 }
23007
23008diff -urNp linux-3.0.4/drivers/atm/zatm.c linux-3.0.4/drivers/atm/zatm.c
23009--- linux-3.0.4/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
23010+++ linux-3.0.4/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
23011@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23012 }
23013 if (!size) {
23014 dev_kfree_skb_irq(skb);
23015- if (vcc) atomic_inc(&vcc->stats->rx_err);
23016+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23017 continue;
23018 }
23019 if (!atm_charge(vcc,skb->truesize)) {
23020@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23021 skb->len = size;
23022 ATM_SKB(skb)->vcc = vcc;
23023 vcc->push(vcc,skb);
23024- atomic_inc(&vcc->stats->rx);
23025+ atomic_inc_unchecked(&vcc->stats->rx);
23026 }
23027 zout(pos & 0xffff,MTA(mbx));
23028 #if 0 /* probably a stupid idea */
23029@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23030 skb_queue_head(&zatm_vcc->backlog,skb);
23031 break;
23032 }
23033- atomic_inc(&vcc->stats->tx);
23034+ atomic_inc_unchecked(&vcc->stats->tx);
23035 wake_up(&zatm_vcc->tx_wait);
23036 }
23037
23038diff -urNp linux-3.0.4/drivers/base/power/wakeup.c linux-3.0.4/drivers/base/power/wakeup.c
23039--- linux-3.0.4/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
23040+++ linux-3.0.4/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
23041@@ -29,14 +29,14 @@ bool events_check_enabled;
23042 * They need to be modified together atomically, so it's better to use one
23043 * atomic variable to hold them both.
23044 */
23045-static atomic_t combined_event_count = ATOMIC_INIT(0);
23046+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23047
23048 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23049 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23050
23051 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23052 {
23053- unsigned int comb = atomic_read(&combined_event_count);
23054+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
23055
23056 *cnt = (comb >> IN_PROGRESS_BITS);
23057 *inpr = comb & MAX_IN_PROGRESS;
23058@@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
23059 ws->last_time = ktime_get();
23060
23061 /* Increment the counter of events in progress. */
23062- atomic_inc(&combined_event_count);
23063+ atomic_inc_unchecked(&combined_event_count);
23064 }
23065
23066 /**
23067@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
23068 * Increment the counter of registered wakeup events and decrement the
23069 * couter of wakeup events in progress simultaneously.
23070 */
23071- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23072+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23073 }
23074
23075 /**
23076diff -urNp linux-3.0.4/drivers/block/cciss.c linux-3.0.4/drivers/block/cciss.c
23077--- linux-3.0.4/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
23078+++ linux-3.0.4/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
23079@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
23080 int err;
23081 u32 cp;
23082
23083+ memset(&arg64, 0, sizeof(arg64));
23084+
23085 err = 0;
23086 err |=
23087 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23088@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
23089 while (!list_empty(&h->reqQ)) {
23090 c = list_entry(h->reqQ.next, CommandList_struct, list);
23091 /* can't do anything if fifo is full */
23092- if ((h->access.fifo_full(h))) {
23093+ if ((h->access->fifo_full(h))) {
23094 dev_warn(&h->pdev->dev, "fifo full\n");
23095 break;
23096 }
23097@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
23098 h->Qdepth--;
23099
23100 /* Tell the controller execute command */
23101- h->access.submit_command(h, c);
23102+ h->access->submit_command(h, c);
23103
23104 /* Put job onto the completed Q */
23105 addQ(&h->cmpQ, c);
23106@@ -3422,17 +3424,17 @@ startio:
23107
23108 static inline unsigned long get_next_completion(ctlr_info_t *h)
23109 {
23110- return h->access.command_completed(h);
23111+ return h->access->command_completed(h);
23112 }
23113
23114 static inline int interrupt_pending(ctlr_info_t *h)
23115 {
23116- return h->access.intr_pending(h);
23117+ return h->access->intr_pending(h);
23118 }
23119
23120 static inline long interrupt_not_for_us(ctlr_info_t *h)
23121 {
23122- return ((h->access.intr_pending(h) == 0) ||
23123+ return ((h->access->intr_pending(h) == 0) ||
23124 (h->interrupts_enabled == 0));
23125 }
23126
23127@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
23128 u32 a;
23129
23130 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23131- return h->access.command_completed(h);
23132+ return h->access->command_completed(h);
23133
23134 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23135 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23136@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
23137 trans_support & CFGTBL_Trans_use_short_tags);
23138
23139 /* Change the access methods to the performant access methods */
23140- h->access = SA5_performant_access;
23141+ h->access = &SA5_performant_access;
23142 h->transMethod = CFGTBL_Trans_Performant;
23143
23144 return;
23145@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
23146 if (prod_index < 0)
23147 return -ENODEV;
23148 h->product_name = products[prod_index].product_name;
23149- h->access = *(products[prod_index].access);
23150+ h->access = products[prod_index].access;
23151
23152 if (cciss_board_disabled(h)) {
23153 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23154@@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
23155 }
23156
23157 /* make sure the board interrupts are off */
23158- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23159+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23160 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
23161 if (rc)
23162 goto clean2;
23163@@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
23164 * fake ones to scoop up any residual completions.
23165 */
23166 spin_lock_irqsave(&h->lock, flags);
23167- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23168+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23169 spin_unlock_irqrestore(&h->lock, flags);
23170 free_irq(h->intr[PERF_MODE_INT], h);
23171 rc = cciss_request_irq(h, cciss_msix_discard_completions,
23172@@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
23173 dev_info(&h->pdev->dev, "Board READY.\n");
23174 dev_info(&h->pdev->dev,
23175 "Waiting for stale completions to drain.\n");
23176- h->access.set_intr_mask(h, CCISS_INTR_ON);
23177+ h->access->set_intr_mask(h, CCISS_INTR_ON);
23178 msleep(10000);
23179- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23180+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23181
23182 rc = controller_reset_failed(h->cfgtable);
23183 if (rc)
23184@@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
23185 cciss_scsi_setup(h);
23186
23187 /* Turn the interrupts on so we can service requests */
23188- h->access.set_intr_mask(h, CCISS_INTR_ON);
23189+ h->access->set_intr_mask(h, CCISS_INTR_ON);
23190
23191 /* Get the firmware version */
23192 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23193@@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
23194 kfree(flush_buf);
23195 if (return_code != IO_OK)
23196 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23197- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23198+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23199 free_irq(h->intr[PERF_MODE_INT], h);
23200 }
23201
23202diff -urNp linux-3.0.4/drivers/block/cciss.h linux-3.0.4/drivers/block/cciss.h
23203--- linux-3.0.4/drivers/block/cciss.h 2011-09-02 18:11:21.000000000 -0400
23204+++ linux-3.0.4/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
23205@@ -100,7 +100,7 @@ struct ctlr_info
23206 /* information about each logical volume */
23207 drive_info_struct *drv[CISS_MAX_LUN];
23208
23209- struct access_method access;
23210+ struct access_method *access;
23211
23212 /* queue and queue Info */
23213 struct list_head reqQ;
23214diff -urNp linux-3.0.4/drivers/block/cpqarray.c linux-3.0.4/drivers/block/cpqarray.c
23215--- linux-3.0.4/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
23216+++ linux-3.0.4/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
23217@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23218 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23219 goto Enomem4;
23220 }
23221- hba[i]->access.set_intr_mask(hba[i], 0);
23222+ hba[i]->access->set_intr_mask(hba[i], 0);
23223 if (request_irq(hba[i]->intr, do_ida_intr,
23224 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23225 {
23226@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23227 add_timer(&hba[i]->timer);
23228
23229 /* Enable IRQ now that spinlock and rate limit timer are set up */
23230- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23231+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23232
23233 for(j=0; j<NWD; j++) {
23234 struct gendisk *disk = ida_gendisk[i][j];
23235@@ -694,7 +694,7 @@ DBGINFO(
23236 for(i=0; i<NR_PRODUCTS; i++) {
23237 if (board_id == products[i].board_id) {
23238 c->product_name = products[i].product_name;
23239- c->access = *(products[i].access);
23240+ c->access = products[i].access;
23241 break;
23242 }
23243 }
23244@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23245 hba[ctlr]->intr = intr;
23246 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23247 hba[ctlr]->product_name = products[j].product_name;
23248- hba[ctlr]->access = *(products[j].access);
23249+ hba[ctlr]->access = products[j].access;
23250 hba[ctlr]->ctlr = ctlr;
23251 hba[ctlr]->board_id = board_id;
23252 hba[ctlr]->pci_dev = NULL; /* not PCI */
23253@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23254 struct scatterlist tmp_sg[SG_MAX];
23255 int i, dir, seg;
23256
23257+ pax_track_stack();
23258+
23259 queue_next:
23260 creq = blk_peek_request(q);
23261 if (!creq)
23262@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23263
23264 while((c = h->reqQ) != NULL) {
23265 /* Can't do anything if we're busy */
23266- if (h->access.fifo_full(h) == 0)
23267+ if (h->access->fifo_full(h) == 0)
23268 return;
23269
23270 /* Get the first entry from the request Q */
23271@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23272 h->Qdepth--;
23273
23274 /* Tell the controller to do our bidding */
23275- h->access.submit_command(h, c);
23276+ h->access->submit_command(h, c);
23277
23278 /* Get onto the completion Q */
23279 addQ(&h->cmpQ, c);
23280@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23281 unsigned long flags;
23282 __u32 a,a1;
23283
23284- istat = h->access.intr_pending(h);
23285+ istat = h->access->intr_pending(h);
23286 /* Is this interrupt for us? */
23287 if (istat == 0)
23288 return IRQ_NONE;
23289@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23290 */
23291 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23292 if (istat & FIFO_NOT_EMPTY) {
23293- while((a = h->access.command_completed(h))) {
23294+ while((a = h->access->command_completed(h))) {
23295 a1 = a; a &= ~3;
23296 if ((c = h->cmpQ) == NULL)
23297 {
23298@@ -1449,11 +1451,11 @@ static int sendcmd(
23299 /*
23300 * Disable interrupt
23301 */
23302- info_p->access.set_intr_mask(info_p, 0);
23303+ info_p->access->set_intr_mask(info_p, 0);
23304 /* Make sure there is room in the command FIFO */
23305 /* Actually it should be completely empty at this time. */
23306 for (i = 200000; i > 0; i--) {
23307- temp = info_p->access.fifo_full(info_p);
23308+ temp = info_p->access->fifo_full(info_p);
23309 if (temp != 0) {
23310 break;
23311 }
23312@@ -1466,7 +1468,7 @@ DBG(
23313 /*
23314 * Send the cmd
23315 */
23316- info_p->access.submit_command(info_p, c);
23317+ info_p->access->submit_command(info_p, c);
23318 complete = pollcomplete(ctlr);
23319
23320 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23321@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23322 * we check the new geometry. Then turn interrupts back on when
23323 * we're done.
23324 */
23325- host->access.set_intr_mask(host, 0);
23326+ host->access->set_intr_mask(host, 0);
23327 getgeometry(ctlr);
23328- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23329+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23330
23331 for(i=0; i<NWD; i++) {
23332 struct gendisk *disk = ida_gendisk[ctlr][i];
23333@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23334 /* Wait (up to 2 seconds) for a command to complete */
23335
23336 for (i = 200000; i > 0; i--) {
23337- done = hba[ctlr]->access.command_completed(hba[ctlr]);
23338+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
23339 if (done == 0) {
23340 udelay(10); /* a short fixed delay */
23341 } else
23342diff -urNp linux-3.0.4/drivers/block/cpqarray.h linux-3.0.4/drivers/block/cpqarray.h
23343--- linux-3.0.4/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
23344+++ linux-3.0.4/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
23345@@ -99,7 +99,7 @@ struct ctlr_info {
23346 drv_info_t drv[NWD];
23347 struct proc_dir_entry *proc;
23348
23349- struct access_method access;
23350+ struct access_method *access;
23351
23352 cmdlist_t *reqQ;
23353 cmdlist_t *cmpQ;
23354diff -urNp linux-3.0.4/drivers/block/DAC960.c linux-3.0.4/drivers/block/DAC960.c
23355--- linux-3.0.4/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
23356+++ linux-3.0.4/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
23357@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23358 unsigned long flags;
23359 int Channel, TargetID;
23360
23361+ pax_track_stack();
23362+
23363 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23364 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23365 sizeof(DAC960_SCSI_Inquiry_T) +
23366diff -urNp linux-3.0.4/drivers/block/drbd/drbd_int.h linux-3.0.4/drivers/block/drbd/drbd_int.h
23367--- linux-3.0.4/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
23368+++ linux-3.0.4/drivers/block/drbd/drbd_int.h 2011-08-23 21:47:55.000000000 -0400
23369@@ -737,7 +737,7 @@ struct drbd_request;
23370 struct drbd_epoch {
23371 struct list_head list;
23372 unsigned int barrier_nr;
23373- atomic_t epoch_size; /* increased on every request added. */
23374+ atomic_unchecked_t epoch_size; /* increased on every request added. */
23375 atomic_t active; /* increased on every req. added, and dec on every finished. */
23376 unsigned long flags;
23377 };
23378@@ -1109,7 +1109,7 @@ struct drbd_conf {
23379 void *int_dig_in;
23380 void *int_dig_vv;
23381 wait_queue_head_t seq_wait;
23382- atomic_t packet_seq;
23383+ atomic_unchecked_t packet_seq;
23384 unsigned int peer_seq;
23385 spinlock_t peer_seq_lock;
23386 unsigned int minor;
23387diff -urNp linux-3.0.4/drivers/block/drbd/drbd_main.c linux-3.0.4/drivers/block/drbd/drbd_main.c
23388--- linux-3.0.4/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
23389+++ linux-3.0.4/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
23390@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
23391 p.sector = sector;
23392 p.block_id = block_id;
23393 p.blksize = blksize;
23394- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23395+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23396
23397 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23398 return false;
23399@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
23400 p.sector = cpu_to_be64(req->sector);
23401 p.block_id = (unsigned long)req;
23402 p.seq_num = cpu_to_be32(req->seq_num =
23403- atomic_add_return(1, &mdev->packet_seq));
23404+ atomic_add_return_unchecked(1, &mdev->packet_seq));
23405
23406 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23407
23408@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
23409 atomic_set(&mdev->unacked_cnt, 0);
23410 atomic_set(&mdev->local_cnt, 0);
23411 atomic_set(&mdev->net_cnt, 0);
23412- atomic_set(&mdev->packet_seq, 0);
23413+ atomic_set_unchecked(&mdev->packet_seq, 0);
23414 atomic_set(&mdev->pp_in_use, 0);
23415 atomic_set(&mdev->pp_in_use_by_net, 0);
23416 atomic_set(&mdev->rs_sect_in, 0);
23417@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23418 mdev->receiver.t_state);
23419
23420 /* no need to lock it, I'm the only thread alive */
23421- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23422- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23423+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23424+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23425 mdev->al_writ_cnt =
23426 mdev->bm_writ_cnt =
23427 mdev->read_cnt =
23428diff -urNp linux-3.0.4/drivers/block/drbd/drbd_nl.c linux-3.0.4/drivers/block/drbd/drbd_nl.c
23429--- linux-3.0.4/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
23430+++ linux-3.0.4/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
23431@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
23432 module_put(THIS_MODULE);
23433 }
23434
23435-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23436+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23437
23438 static unsigned short *
23439 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23440@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
23441 cn_reply->id.idx = CN_IDX_DRBD;
23442 cn_reply->id.val = CN_VAL_DRBD;
23443
23444- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23445+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23446 cn_reply->ack = 0; /* not used here. */
23447 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23448 (int)((char *)tl - (char *)reply->tag_list);
23449@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23450 cn_reply->id.idx = CN_IDX_DRBD;
23451 cn_reply->id.val = CN_VAL_DRBD;
23452
23453- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23454+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23455 cn_reply->ack = 0; /* not used here. */
23456 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23457 (int)((char *)tl - (char *)reply->tag_list);
23458@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
23459 cn_reply->id.idx = CN_IDX_DRBD;
23460 cn_reply->id.val = CN_VAL_DRBD;
23461
23462- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
23463+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
23464 cn_reply->ack = 0; // not used here.
23465 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23466 (int)((char*)tl - (char*)reply->tag_list);
23467@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
23468 cn_reply->id.idx = CN_IDX_DRBD;
23469 cn_reply->id.val = CN_VAL_DRBD;
23470
23471- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23472+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23473 cn_reply->ack = 0; /* not used here. */
23474 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23475 (int)((char *)tl - (char *)reply->tag_list);
23476diff -urNp linux-3.0.4/drivers/block/drbd/drbd_receiver.c linux-3.0.4/drivers/block/drbd/drbd_receiver.c
23477--- linux-3.0.4/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
23478+++ linux-3.0.4/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
23479@@ -894,7 +894,7 @@ retry:
23480 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
23481 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
23482
23483- atomic_set(&mdev->packet_seq, 0);
23484+ atomic_set_unchecked(&mdev->packet_seq, 0);
23485 mdev->peer_seq = 0;
23486
23487 drbd_thread_start(&mdev->asender);
23488@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
23489 do {
23490 next_epoch = NULL;
23491
23492- epoch_size = atomic_read(&epoch->epoch_size);
23493+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
23494
23495 switch (ev & ~EV_CLEANUP) {
23496 case EV_PUT:
23497@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
23498 rv = FE_DESTROYED;
23499 } else {
23500 epoch->flags = 0;
23501- atomic_set(&epoch->epoch_size, 0);
23502+ atomic_set_unchecked(&epoch->epoch_size, 0);
23503 /* atomic_set(&epoch->active, 0); is already zero */
23504 if (rv == FE_STILL_LIVE)
23505 rv = FE_RECYCLED;
23506@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
23507 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
23508 drbd_flush(mdev);
23509
23510- if (atomic_read(&mdev->current_epoch->epoch_size)) {
23511+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23512 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
23513 if (epoch)
23514 break;
23515 }
23516
23517 epoch = mdev->current_epoch;
23518- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
23519+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
23520
23521 D_ASSERT(atomic_read(&epoch->active) == 0);
23522 D_ASSERT(epoch->flags == 0);
23523@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
23524 }
23525
23526 epoch->flags = 0;
23527- atomic_set(&epoch->epoch_size, 0);
23528+ atomic_set_unchecked(&epoch->epoch_size, 0);
23529 atomic_set(&epoch->active, 0);
23530
23531 spin_lock(&mdev->epoch_lock);
23532- if (atomic_read(&mdev->current_epoch->epoch_size)) {
23533+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23534 list_add(&epoch->list, &mdev->current_epoch->list);
23535 mdev->current_epoch = epoch;
23536 mdev->epochs++;
23537@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
23538 spin_unlock(&mdev->peer_seq_lock);
23539
23540 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
23541- atomic_inc(&mdev->current_epoch->epoch_size);
23542+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
23543 return drbd_drain_block(mdev, data_size);
23544 }
23545
23546@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
23547
23548 spin_lock(&mdev->epoch_lock);
23549 e->epoch = mdev->current_epoch;
23550- atomic_inc(&e->epoch->epoch_size);
23551+ atomic_inc_unchecked(&e->epoch->epoch_size);
23552 atomic_inc(&e->epoch->active);
23553 spin_unlock(&mdev->epoch_lock);
23554
23555@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
23556 D_ASSERT(list_empty(&mdev->done_ee));
23557
23558 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
23559- atomic_set(&mdev->current_epoch->epoch_size, 0);
23560+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
23561 D_ASSERT(list_empty(&mdev->current_epoch->list));
23562 }
23563
23564diff -urNp linux-3.0.4/drivers/block/nbd.c linux-3.0.4/drivers/block/nbd.c
23565--- linux-3.0.4/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
23566+++ linux-3.0.4/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
23567@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
23568 struct kvec iov;
23569 sigset_t blocked, oldset;
23570
23571+ pax_track_stack();
23572+
23573 if (unlikely(!sock)) {
23574 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
23575 lo->disk->disk_name, (send ? "send" : "recv"));
23576@@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
23577 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
23578 unsigned int cmd, unsigned long arg)
23579 {
23580+ pax_track_stack();
23581+
23582 switch (cmd) {
23583 case NBD_DISCONNECT: {
23584 struct request sreq;
23585diff -urNp linux-3.0.4/drivers/char/agp/frontend.c linux-3.0.4/drivers/char/agp/frontend.c
23586--- linux-3.0.4/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
23587+++ linux-3.0.4/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
23588@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
23589 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23590 return -EFAULT;
23591
23592- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23593+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23594 return -EFAULT;
23595
23596 client = agp_find_client_by_pid(reserve.pid);
23597diff -urNp linux-3.0.4/drivers/char/briq_panel.c linux-3.0.4/drivers/char/briq_panel.c
23598--- linux-3.0.4/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
23599+++ linux-3.0.4/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
23600@@ -9,6 +9,7 @@
23601 #include <linux/types.h>
23602 #include <linux/errno.h>
23603 #include <linux/tty.h>
23604+#include <linux/mutex.h>
23605 #include <linux/timer.h>
23606 #include <linux/kernel.h>
23607 #include <linux/wait.h>
23608@@ -34,6 +35,7 @@ static int vfd_is_open;
23609 static unsigned char vfd[40];
23610 static int vfd_cursor;
23611 static unsigned char ledpb, led;
23612+static DEFINE_MUTEX(vfd_mutex);
23613
23614 static void update_vfd(void)
23615 {
23616@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
23617 if (!vfd_is_open)
23618 return -EBUSY;
23619
23620+ mutex_lock(&vfd_mutex);
23621 for (;;) {
23622 char c;
23623 if (!indx)
23624 break;
23625- if (get_user(c, buf))
23626+ if (get_user(c, buf)) {
23627+ mutex_unlock(&vfd_mutex);
23628 return -EFAULT;
23629+ }
23630 if (esc) {
23631 set_led(c);
23632 esc = 0;
23633@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
23634 buf++;
23635 }
23636 update_vfd();
23637+ mutex_unlock(&vfd_mutex);
23638
23639 return len;
23640 }
23641diff -urNp linux-3.0.4/drivers/char/genrtc.c linux-3.0.4/drivers/char/genrtc.c
23642--- linux-3.0.4/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
23643+++ linux-3.0.4/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
23644@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
23645 switch (cmd) {
23646
23647 case RTC_PLL_GET:
23648+ memset(&pll, 0, sizeof(pll));
23649 if (get_rtc_pll(&pll))
23650 return -EINVAL;
23651 else
23652diff -urNp linux-3.0.4/drivers/char/hpet.c linux-3.0.4/drivers/char/hpet.c
23653--- linux-3.0.4/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
23654+++ linux-3.0.4/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
23655@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
23656 }
23657
23658 static int
23659-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
23660+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
23661 struct hpet_info *info)
23662 {
23663 struct hpet_timer __iomem *timer;
23664diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c
23665--- linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
23666+++ linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
23667@@ -415,7 +415,7 @@ struct ipmi_smi {
23668 struct proc_dir_entry *proc_dir;
23669 char proc_dir_name[10];
23670
23671- atomic_t stats[IPMI_NUM_STATS];
23672+ atomic_unchecked_t stats[IPMI_NUM_STATS];
23673
23674 /*
23675 * run_to_completion duplicate of smb_info, smi_info
23676@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23677
23678
23679 #define ipmi_inc_stat(intf, stat) \
23680- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23681+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23682 #define ipmi_get_stat(intf, stat) \
23683- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23684+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23685
23686 static int is_lan_addr(struct ipmi_addr *addr)
23687 {
23688@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23689 INIT_LIST_HEAD(&intf->cmd_rcvrs);
23690 init_waitqueue_head(&intf->waitq);
23691 for (i = 0; i < IPMI_NUM_STATS; i++)
23692- atomic_set(&intf->stats[i], 0);
23693+ atomic_set_unchecked(&intf->stats[i], 0);
23694
23695 intf->proc_dir = NULL;
23696
23697@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
23698 struct ipmi_smi_msg smi_msg;
23699 struct ipmi_recv_msg recv_msg;
23700
23701+ pax_track_stack();
23702+
23703 si = (struct ipmi_system_interface_addr *) &addr;
23704 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
23705 si->channel = IPMI_BMC_CHANNEL;
23706diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c
23707--- linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
23708+++ linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
23709@@ -277,7 +277,7 @@ struct smi_info {
23710 unsigned char slave_addr;
23711
23712 /* Counters and things for the proc filesystem. */
23713- atomic_t stats[SI_NUM_STATS];
23714+ atomic_unchecked_t stats[SI_NUM_STATS];
23715
23716 struct task_struct *thread;
23717
23718@@ -286,9 +286,9 @@ struct smi_info {
23719 };
23720
23721 #define smi_inc_stat(smi, stat) \
23722- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23723+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23724 #define smi_get_stat(smi, stat) \
23725- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23726+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23727
23728 #define SI_MAX_PARMS 4
23729
23730@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
23731 atomic_set(&new_smi->req_events, 0);
23732 new_smi->run_to_completion = 0;
23733 for (i = 0; i < SI_NUM_STATS; i++)
23734- atomic_set(&new_smi->stats[i], 0);
23735+ atomic_set_unchecked(&new_smi->stats[i], 0);
23736
23737 new_smi->interrupt_disabled = 1;
23738 atomic_set(&new_smi->stop_operation, 0);
23739diff -urNp linux-3.0.4/drivers/char/Kconfig linux-3.0.4/drivers/char/Kconfig
23740--- linux-3.0.4/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
23741+++ linux-3.0.4/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
23742@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
23743
23744 config DEVKMEM
23745 bool "/dev/kmem virtual device support"
23746- default y
23747+ default n
23748+ depends on !GRKERNSEC_KMEM
23749 help
23750 Say Y here if you want to support the /dev/kmem device. The
23751 /dev/kmem device is rarely used, but can be used for certain
23752@@ -596,6 +597,7 @@ config DEVPORT
23753 bool
23754 depends on !M68K
23755 depends on ISA || PCI
23756+ depends on !GRKERNSEC_KMEM
23757 default y
23758
23759 source "drivers/s390/char/Kconfig"
23760diff -urNp linux-3.0.4/drivers/char/mem.c linux-3.0.4/drivers/char/mem.c
23761--- linux-3.0.4/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
23762+++ linux-3.0.4/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
23763@@ -18,6 +18,7 @@
23764 #include <linux/raw.h>
23765 #include <linux/tty.h>
23766 #include <linux/capability.h>
23767+#include <linux/security.h>
23768 #include <linux/ptrace.h>
23769 #include <linux/device.h>
23770 #include <linux/highmem.h>
23771@@ -34,6 +35,10 @@
23772 # include <linux/efi.h>
23773 #endif
23774
23775+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23776+extern struct file_operations grsec_fops;
23777+#endif
23778+
23779 static inline unsigned long size_inside_page(unsigned long start,
23780 unsigned long size)
23781 {
23782@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
23783
23784 while (cursor < to) {
23785 if (!devmem_is_allowed(pfn)) {
23786+#ifdef CONFIG_GRKERNSEC_KMEM
23787+ gr_handle_mem_readwrite(from, to);
23788+#else
23789 printk(KERN_INFO
23790 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23791 current->comm, from, to);
23792+#endif
23793 return 0;
23794 }
23795 cursor += PAGE_SIZE;
23796@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
23797 }
23798 return 1;
23799 }
23800+#elif defined(CONFIG_GRKERNSEC_KMEM)
23801+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23802+{
23803+ return 0;
23804+}
23805 #else
23806 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23807 {
23808@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
23809
23810 while (count > 0) {
23811 unsigned long remaining;
23812+ char *temp;
23813
23814 sz = size_inside_page(p, count);
23815
23816@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
23817 if (!ptr)
23818 return -EFAULT;
23819
23820- remaining = copy_to_user(buf, ptr, sz);
23821+#ifdef CONFIG_PAX_USERCOPY
23822+ temp = kmalloc(sz, GFP_KERNEL);
23823+ if (!temp) {
23824+ unxlate_dev_mem_ptr(p, ptr);
23825+ return -ENOMEM;
23826+ }
23827+ memcpy(temp, ptr, sz);
23828+#else
23829+ temp = ptr;
23830+#endif
23831+
23832+ remaining = copy_to_user(buf, temp, sz);
23833+
23834+#ifdef CONFIG_PAX_USERCOPY
23835+ kfree(temp);
23836+#endif
23837+
23838 unxlate_dev_mem_ptr(p, ptr);
23839 if (remaining)
23840 return -EFAULT;
23841@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
23842 size_t count, loff_t *ppos)
23843 {
23844 unsigned long p = *ppos;
23845- ssize_t low_count, read, sz;
23846+ ssize_t low_count, read, sz, err = 0;
23847 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
23848- int err = 0;
23849
23850 read = 0;
23851 if (p < (unsigned long) high_memory) {
23852@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
23853 }
23854 #endif
23855 while (low_count > 0) {
23856+ char *temp;
23857+
23858 sz = size_inside_page(p, low_count);
23859
23860 /*
23861@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
23862 */
23863 kbuf = xlate_dev_kmem_ptr((char *)p);
23864
23865- if (copy_to_user(buf, kbuf, sz))
23866+#ifdef CONFIG_PAX_USERCOPY
23867+ temp = kmalloc(sz, GFP_KERNEL);
23868+ if (!temp)
23869+ return -ENOMEM;
23870+ memcpy(temp, kbuf, sz);
23871+#else
23872+ temp = kbuf;
23873+#endif
23874+
23875+ err = copy_to_user(buf, temp, sz);
23876+
23877+#ifdef CONFIG_PAX_USERCOPY
23878+ kfree(temp);
23879+#endif
23880+
23881+ if (err)
23882 return -EFAULT;
23883 buf += sz;
23884 p += sz;
23885@@ -866,6 +913,9 @@ static const struct memdev {
23886 #ifdef CONFIG_CRASH_DUMP
23887 [12] = { "oldmem", 0, &oldmem_fops, NULL },
23888 #endif
23889+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23890+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
23891+#endif
23892 };
23893
23894 static int memory_open(struct inode *inode, struct file *filp)
23895diff -urNp linux-3.0.4/drivers/char/nvram.c linux-3.0.4/drivers/char/nvram.c
23896--- linux-3.0.4/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
23897+++ linux-3.0.4/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
23898@@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
23899
23900 spin_unlock_irq(&rtc_lock);
23901
23902- if (copy_to_user(buf, contents, tmp - contents))
23903+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
23904 return -EFAULT;
23905
23906 *ppos = i;
23907diff -urNp linux-3.0.4/drivers/char/random.c linux-3.0.4/drivers/char/random.c
23908--- linux-3.0.4/drivers/char/random.c 2011-09-02 18:11:21.000000000 -0400
23909+++ linux-3.0.4/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
23910@@ -261,8 +261,13 @@
23911 /*
23912 * Configuration information
23913 */
23914+#ifdef CONFIG_GRKERNSEC_RANDNET
23915+#define INPUT_POOL_WORDS 512
23916+#define OUTPUT_POOL_WORDS 128
23917+#else
23918 #define INPUT_POOL_WORDS 128
23919 #define OUTPUT_POOL_WORDS 32
23920+#endif
23921 #define SEC_XFER_SIZE 512
23922 #define EXTRACT_SIZE 10
23923
23924@@ -300,10 +305,17 @@ static struct poolinfo {
23925 int poolwords;
23926 int tap1, tap2, tap3, tap4, tap5;
23927 } poolinfo_table[] = {
23928+#ifdef CONFIG_GRKERNSEC_RANDNET
23929+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
23930+ { 512, 411, 308, 208, 104, 1 },
23931+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
23932+ { 128, 103, 76, 51, 25, 1 },
23933+#else
23934 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
23935 { 128, 103, 76, 51, 25, 1 },
23936 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
23937 { 32, 26, 20, 14, 7, 1 },
23938+#endif
23939 #if 0
23940 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
23941 { 2048, 1638, 1231, 819, 411, 1 },
23942@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
23943
23944 extract_buf(r, tmp);
23945 i = min_t(int, nbytes, EXTRACT_SIZE);
23946- if (copy_to_user(buf, tmp, i)) {
23947+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
23948 ret = -EFAULT;
23949 break;
23950 }
23951@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
23952 #include <linux/sysctl.h>
23953
23954 static int min_read_thresh = 8, min_write_thresh;
23955-static int max_read_thresh = INPUT_POOL_WORDS * 32;
23956+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
23957 static int max_write_thresh = INPUT_POOL_WORDS * 32;
23958 static char sysctl_bootid[16];
23959
23960diff -urNp linux-3.0.4/drivers/char/sonypi.c linux-3.0.4/drivers/char/sonypi.c
23961--- linux-3.0.4/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
23962+++ linux-3.0.4/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
23963@@ -55,6 +55,7 @@
23964 #include <asm/uaccess.h>
23965 #include <asm/io.h>
23966 #include <asm/system.h>
23967+#include <asm/local.h>
23968
23969 #include <linux/sonypi.h>
23970
23971@@ -491,7 +492,7 @@ static struct sonypi_device {
23972 spinlock_t fifo_lock;
23973 wait_queue_head_t fifo_proc_list;
23974 struct fasync_struct *fifo_async;
23975- int open_count;
23976+ local_t open_count;
23977 int model;
23978 struct input_dev *input_jog_dev;
23979 struct input_dev *input_key_dev;
23980@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
23981 static int sonypi_misc_release(struct inode *inode, struct file *file)
23982 {
23983 mutex_lock(&sonypi_device.lock);
23984- sonypi_device.open_count--;
23985+ local_dec(&sonypi_device.open_count);
23986 mutex_unlock(&sonypi_device.lock);
23987 return 0;
23988 }
23989@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
23990 {
23991 mutex_lock(&sonypi_device.lock);
23992 /* Flush input queue on first open */
23993- if (!sonypi_device.open_count)
23994+ if (!local_read(&sonypi_device.open_count))
23995 kfifo_reset(&sonypi_device.fifo);
23996- sonypi_device.open_count++;
23997+ local_inc(&sonypi_device.open_count);
23998 mutex_unlock(&sonypi_device.lock);
23999
24000 return 0;
24001diff -urNp linux-3.0.4/drivers/char/tpm/tpm_bios.c linux-3.0.4/drivers/char/tpm/tpm_bios.c
24002--- linux-3.0.4/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
24003+++ linux-3.0.4/drivers/char/tpm/tpm_bios.c 2011-08-23 21:47:55.000000000 -0400
24004@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
24005 event = addr;
24006
24007 if ((event->event_type == 0 && event->event_size == 0) ||
24008- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
24009+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
24010 return NULL;
24011
24012 return addr;
24013@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24014 return NULL;
24015
24016 if ((event->event_type == 0 && event->event_size == 0) ||
24017- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24018+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24019 return NULL;
24020
24021 (*pos)++;
24022@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24023 int i;
24024
24025 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24026- seq_putc(m, data[i]);
24027+ if (!seq_putc(m, data[i]))
24028+ return -EFAULT;
24029
24030 return 0;
24031 }
24032@@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24033 log->bios_event_log_end = log->bios_event_log + len;
24034
24035 virt = acpi_os_map_memory(start, len);
24036+ if (!virt) {
24037+ kfree(log->bios_event_log);
24038+ log->bios_event_log = NULL;
24039+ return -EFAULT;
24040+ }
24041
24042 memcpy(log->bios_event_log, virt, len);
24043
24044diff -urNp linux-3.0.4/drivers/char/tpm/tpm.c linux-3.0.4/drivers/char/tpm/tpm.c
24045--- linux-3.0.4/drivers/char/tpm/tpm.c 2011-07-21 22:17:23.000000000 -0400
24046+++ linux-3.0.4/drivers/char/tpm/tpm.c 2011-08-23 21:48:14.000000000 -0400
24047@@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24048 chip->vendor.req_complete_val)
24049 goto out_recv;
24050
24051- if ((status == chip->vendor.req_canceled)) {
24052+ if (status == chip->vendor.req_canceled) {
24053 dev_err(chip->dev, "Operation Canceled\n");
24054 rc = -ECANCELED;
24055 goto out;
24056@@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24057
24058 struct tpm_chip *chip = dev_get_drvdata(dev);
24059
24060+ pax_track_stack();
24061+
24062 tpm_cmd.header.in = tpm_readpubek_header;
24063 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24064 "attempting to read the PUBEK");
24065diff -urNp linux-3.0.4/drivers/crypto/hifn_795x.c linux-3.0.4/drivers/crypto/hifn_795x.c
24066--- linux-3.0.4/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
24067+++ linux-3.0.4/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
24068@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24069 0xCA, 0x34, 0x2B, 0x2E};
24070 struct scatterlist sg;
24071
24072+ pax_track_stack();
24073+
24074 memset(src, 0, sizeof(src));
24075 memset(ctx.key, 0, sizeof(ctx.key));
24076
24077diff -urNp linux-3.0.4/drivers/crypto/padlock-aes.c linux-3.0.4/drivers/crypto/padlock-aes.c
24078--- linux-3.0.4/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
24079+++ linux-3.0.4/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
24080@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24081 struct crypto_aes_ctx gen_aes;
24082 int cpu;
24083
24084+ pax_track_stack();
24085+
24086 if (key_len % 8) {
24087 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24088 return -EINVAL;
24089diff -urNp linux-3.0.4/drivers/edac/edac_pci_sysfs.c linux-3.0.4/drivers/edac/edac_pci_sysfs.c
24090--- linux-3.0.4/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
24091+++ linux-3.0.4/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
24092@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24093 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24094 static int edac_pci_poll_msec = 1000; /* one second workq period */
24095
24096-static atomic_t pci_parity_count = ATOMIC_INIT(0);
24097-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24098+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24099+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24100
24101 static struct kobject *edac_pci_top_main_kobj;
24102 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24103@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24104 edac_printk(KERN_CRIT, EDAC_PCI,
24105 "Signaled System Error on %s\n",
24106 pci_name(dev));
24107- atomic_inc(&pci_nonparity_count);
24108+ atomic_inc_unchecked(&pci_nonparity_count);
24109 }
24110
24111 if (status & (PCI_STATUS_PARITY)) {
24112@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24113 "Master Data Parity Error on %s\n",
24114 pci_name(dev));
24115
24116- atomic_inc(&pci_parity_count);
24117+ atomic_inc_unchecked(&pci_parity_count);
24118 }
24119
24120 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24121@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24122 "Detected Parity Error on %s\n",
24123 pci_name(dev));
24124
24125- atomic_inc(&pci_parity_count);
24126+ atomic_inc_unchecked(&pci_parity_count);
24127 }
24128 }
24129
24130@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24131 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24132 "Signaled System Error on %s\n",
24133 pci_name(dev));
24134- atomic_inc(&pci_nonparity_count);
24135+ atomic_inc_unchecked(&pci_nonparity_count);
24136 }
24137
24138 if (status & (PCI_STATUS_PARITY)) {
24139@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24140 "Master Data Parity Error on "
24141 "%s\n", pci_name(dev));
24142
24143- atomic_inc(&pci_parity_count);
24144+ atomic_inc_unchecked(&pci_parity_count);
24145 }
24146
24147 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24148@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24149 "Detected Parity Error on %s\n",
24150 pci_name(dev));
24151
24152- atomic_inc(&pci_parity_count);
24153+ atomic_inc_unchecked(&pci_parity_count);
24154 }
24155 }
24156 }
24157@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24158 if (!check_pci_errors)
24159 return;
24160
24161- before_count = atomic_read(&pci_parity_count);
24162+ before_count = atomic_read_unchecked(&pci_parity_count);
24163
24164 /* scan all PCI devices looking for a Parity Error on devices and
24165 * bridges.
24166@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24167 /* Only if operator has selected panic on PCI Error */
24168 if (edac_pci_get_panic_on_pe()) {
24169 /* If the count is different 'after' from 'before' */
24170- if (before_count != atomic_read(&pci_parity_count))
24171+ if (before_count != atomic_read_unchecked(&pci_parity_count))
24172 panic("EDAC: PCI Parity Error");
24173 }
24174 }
24175diff -urNp linux-3.0.4/drivers/edac/mce_amd.h linux-3.0.4/drivers/edac/mce_amd.h
24176--- linux-3.0.4/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
24177+++ linux-3.0.4/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
24178@@ -83,7 +83,7 @@ struct amd_decoder_ops {
24179 bool (*dc_mce)(u16, u8);
24180 bool (*ic_mce)(u16, u8);
24181 bool (*nb_mce)(u16, u8);
24182-};
24183+} __no_const;
24184
24185 void amd_report_gart_errors(bool);
24186 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24187diff -urNp linux-3.0.4/drivers/firewire/core-card.c linux-3.0.4/drivers/firewire/core-card.c
24188--- linux-3.0.4/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
24189+++ linux-3.0.4/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
24190@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
24191
24192 void fw_core_remove_card(struct fw_card *card)
24193 {
24194- struct fw_card_driver dummy_driver = dummy_driver_template;
24195+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
24196
24197 card->driver->update_phy_reg(card, 4,
24198 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24199diff -urNp linux-3.0.4/drivers/firewire/core-cdev.c linux-3.0.4/drivers/firewire/core-cdev.c
24200--- linux-3.0.4/drivers/firewire/core-cdev.c 2011-09-02 18:11:21.000000000 -0400
24201+++ linux-3.0.4/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
24202@@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
24203 int ret;
24204
24205 if ((request->channels == 0 && request->bandwidth == 0) ||
24206- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24207- request->bandwidth < 0)
24208+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24209 return -EINVAL;
24210
24211 r = kmalloc(sizeof(*r), GFP_KERNEL);
24212diff -urNp linux-3.0.4/drivers/firewire/core.h linux-3.0.4/drivers/firewire/core.h
24213--- linux-3.0.4/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
24214+++ linux-3.0.4/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
24215@@ -101,6 +101,7 @@ struct fw_card_driver {
24216
24217 int (*stop_iso)(struct fw_iso_context *ctx);
24218 };
24219+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24220
24221 void fw_card_initialize(struct fw_card *card,
24222 const struct fw_card_driver *driver, struct device *device);
24223diff -urNp linux-3.0.4/drivers/firewire/core-transaction.c linux-3.0.4/drivers/firewire/core-transaction.c
24224--- linux-3.0.4/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
24225+++ linux-3.0.4/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
24226@@ -37,6 +37,7 @@
24227 #include <linux/timer.h>
24228 #include <linux/types.h>
24229 #include <linux/workqueue.h>
24230+#include <linux/sched.h>
24231
24232 #include <asm/byteorder.h>
24233
24234@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
24235 struct transaction_callback_data d;
24236 struct fw_transaction t;
24237
24238+ pax_track_stack();
24239+
24240 init_timer_on_stack(&t.split_timeout_timer);
24241 init_completion(&d.done);
24242 d.payload = payload;
24243diff -urNp linux-3.0.4/drivers/firmware/dmi_scan.c linux-3.0.4/drivers/firmware/dmi_scan.c
24244--- linux-3.0.4/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
24245+++ linux-3.0.4/drivers/firmware/dmi_scan.c 2011-08-23 21:47:55.000000000 -0400
24246@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24247 }
24248 }
24249 else {
24250- /*
24251- * no iounmap() for that ioremap(); it would be a no-op, but
24252- * it's so early in setup that sucker gets confused into doing
24253- * what it shouldn't if we actually call it.
24254- */
24255 p = dmi_ioremap(0xF0000, 0x10000);
24256 if (p == NULL)
24257 goto error;
24258diff -urNp linux-3.0.4/drivers/gpio/vr41xx_giu.c linux-3.0.4/drivers/gpio/vr41xx_giu.c
24259--- linux-3.0.4/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
24260+++ linux-3.0.4/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
24261@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24262 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24263 maskl, pendl, maskh, pendh);
24264
24265- atomic_inc(&irq_err_count);
24266+ atomic_inc_unchecked(&irq_err_count);
24267
24268 return -EINVAL;
24269 }
24270diff -urNp linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c
24271--- linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
24272+++ linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
24273@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24274 struct drm_crtc *tmp;
24275 int crtc_mask = 1;
24276
24277- WARN(!crtc, "checking null crtc?\n");
24278+ BUG_ON(!crtc);
24279
24280 dev = crtc->dev;
24281
24282@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24283 struct drm_encoder *encoder;
24284 bool ret = true;
24285
24286+ pax_track_stack();
24287+
24288 crtc->enabled = drm_helper_crtc_in_use(crtc);
24289 if (!crtc->enabled)
24290 return true;
24291diff -urNp linux-3.0.4/drivers/gpu/drm/drm_drv.c linux-3.0.4/drivers/gpu/drm/drm_drv.c
24292--- linux-3.0.4/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
24293+++ linux-3.0.4/drivers/gpu/drm/drm_drv.c 2011-08-23 21:47:55.000000000 -0400
24294@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24295
24296 dev = file_priv->minor->dev;
24297 atomic_inc(&dev->ioctl_count);
24298- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24299+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24300 ++file_priv->ioctl_count;
24301
24302 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24303diff -urNp linux-3.0.4/drivers/gpu/drm/drm_fops.c linux-3.0.4/drivers/gpu/drm/drm_fops.c
24304--- linux-3.0.4/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
24305+++ linux-3.0.4/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
24306@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24307 }
24308
24309 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24310- atomic_set(&dev->counts[i], 0);
24311+ atomic_set_unchecked(&dev->counts[i], 0);
24312
24313 dev->sigdata.lock = NULL;
24314
24315@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24316
24317 retcode = drm_open_helper(inode, filp, dev);
24318 if (!retcode) {
24319- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24320- if (!dev->open_count++)
24321+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24322+ if (local_inc_return(&dev->open_count) == 1)
24323 retcode = drm_setup(dev);
24324 }
24325 if (!retcode) {
24326@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24327
24328 mutex_lock(&drm_global_mutex);
24329
24330- DRM_DEBUG("open_count = %d\n", dev->open_count);
24331+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24332
24333 if (dev->driver->preclose)
24334 dev->driver->preclose(dev, file_priv);
24335@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24336 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24337 task_pid_nr(current),
24338 (long)old_encode_dev(file_priv->minor->device),
24339- dev->open_count);
24340+ local_read(&dev->open_count));
24341
24342 /* if the master has gone away we can't do anything with the lock */
24343 if (file_priv->minor->master)
24344@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24345 * End inline drm_release
24346 */
24347
24348- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24349- if (!--dev->open_count) {
24350+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24351+ if (local_dec_and_test(&dev->open_count)) {
24352 if (atomic_read(&dev->ioctl_count)) {
24353 DRM_ERROR("Device busy: %d\n",
24354 atomic_read(&dev->ioctl_count));
24355diff -urNp linux-3.0.4/drivers/gpu/drm/drm_global.c linux-3.0.4/drivers/gpu/drm/drm_global.c
24356--- linux-3.0.4/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
24357+++ linux-3.0.4/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
24358@@ -36,7 +36,7 @@
24359 struct drm_global_item {
24360 struct mutex mutex;
24361 void *object;
24362- int refcount;
24363+ atomic_t refcount;
24364 };
24365
24366 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24367@@ -49,7 +49,7 @@ void drm_global_init(void)
24368 struct drm_global_item *item = &glob[i];
24369 mutex_init(&item->mutex);
24370 item->object = NULL;
24371- item->refcount = 0;
24372+ atomic_set(&item->refcount, 0);
24373 }
24374 }
24375
24376@@ -59,7 +59,7 @@ void drm_global_release(void)
24377 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24378 struct drm_global_item *item = &glob[i];
24379 BUG_ON(item->object != NULL);
24380- BUG_ON(item->refcount != 0);
24381+ BUG_ON(atomic_read(&item->refcount) != 0);
24382 }
24383 }
24384
24385@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24386 void *object;
24387
24388 mutex_lock(&item->mutex);
24389- if (item->refcount == 0) {
24390+ if (atomic_read(&item->refcount) == 0) {
24391 item->object = kzalloc(ref->size, GFP_KERNEL);
24392 if (unlikely(item->object == NULL)) {
24393 ret = -ENOMEM;
24394@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24395 goto out_err;
24396
24397 }
24398- ++item->refcount;
24399+ atomic_inc(&item->refcount);
24400 ref->object = item->object;
24401 object = item->object;
24402 mutex_unlock(&item->mutex);
24403@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24404 struct drm_global_item *item = &glob[ref->global_type];
24405
24406 mutex_lock(&item->mutex);
24407- BUG_ON(item->refcount == 0);
24408+ BUG_ON(atomic_read(&item->refcount) == 0);
24409 BUG_ON(ref->object != item->object);
24410- if (--item->refcount == 0) {
24411+ if (atomic_dec_and_test(&item->refcount)) {
24412 ref->release(ref);
24413 item->object = NULL;
24414 }
24415diff -urNp linux-3.0.4/drivers/gpu/drm/drm_info.c linux-3.0.4/drivers/gpu/drm/drm_info.c
24416--- linux-3.0.4/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
24417+++ linux-3.0.4/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
24418@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24419 struct drm_local_map *map;
24420 struct drm_map_list *r_list;
24421
24422- /* Hardcoded from _DRM_FRAME_BUFFER,
24423- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24424- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24425- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24426+ static const char * const types[] = {
24427+ [_DRM_FRAME_BUFFER] = "FB",
24428+ [_DRM_REGISTERS] = "REG",
24429+ [_DRM_SHM] = "SHM",
24430+ [_DRM_AGP] = "AGP",
24431+ [_DRM_SCATTER_GATHER] = "SG",
24432+ [_DRM_CONSISTENT] = "PCI",
24433+ [_DRM_GEM] = "GEM" };
24434 const char *type;
24435 int i;
24436
24437@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24438 map = r_list->map;
24439 if (!map)
24440 continue;
24441- if (map->type < 0 || map->type > 5)
24442+ if (map->type >= ARRAY_SIZE(types))
24443 type = "??";
24444 else
24445 type = types[map->type];
24446@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24447 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24448 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24449 vma->vm_flags & VM_IO ? 'i' : '-',
24450+#ifdef CONFIG_GRKERNSEC_HIDESYM
24451+ 0);
24452+#else
24453 vma->vm_pgoff);
24454+#endif
24455
24456 #if defined(__i386__)
24457 pgprot = pgprot_val(vma->vm_page_prot);
24458diff -urNp linux-3.0.4/drivers/gpu/drm/drm_ioctl.c linux-3.0.4/drivers/gpu/drm/drm_ioctl.c
24459--- linux-3.0.4/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
24460+++ linux-3.0.4/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
24461@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
24462 stats->data[i].value =
24463 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24464 else
24465- stats->data[i].value = atomic_read(&dev->counts[i]);
24466+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24467 stats->data[i].type = dev->types[i];
24468 }
24469
24470diff -urNp linux-3.0.4/drivers/gpu/drm/drm_lock.c linux-3.0.4/drivers/gpu/drm/drm_lock.c
24471--- linux-3.0.4/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
24472+++ linux-3.0.4/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
24473@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
24474 if (drm_lock_take(&master->lock, lock->context)) {
24475 master->lock.file_priv = file_priv;
24476 master->lock.lock_time = jiffies;
24477- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24478+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24479 break; /* Got lock */
24480 }
24481
24482@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
24483 return -EINVAL;
24484 }
24485
24486- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24487+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24488
24489 if (drm_lock_free(&master->lock, lock->context)) {
24490 /* FIXME: Should really bail out here. */
24491diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c
24492--- linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
24493+++ linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
24494@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
24495 dma->buflist[vertex->idx],
24496 vertex->discard, vertex->used);
24497
24498- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24499- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24500+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24501+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24502 sarea_priv->last_enqueue = dev_priv->counter - 1;
24503 sarea_priv->last_dispatch = (int)hw_status[5];
24504
24505@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
24506 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24507 mc->last_render);
24508
24509- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24510- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24511+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24512+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24513 sarea_priv->last_enqueue = dev_priv->counter - 1;
24514 sarea_priv->last_dispatch = (int)hw_status[5];
24515
24516diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h
24517--- linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
24518+++ linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
24519@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
24520 int page_flipping;
24521
24522 wait_queue_head_t irq_queue;
24523- atomic_t irq_received;
24524- atomic_t irq_emitted;
24525+ atomic_unchecked_t irq_received;
24526+ atomic_unchecked_t irq_emitted;
24527
24528 int front_offset;
24529 } drm_i810_private_t;
24530diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c
24531--- linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
24532+++ linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-23 21:47:55.000000000 -0400
24533@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
24534 I915_READ(GTIMR));
24535 }
24536 seq_printf(m, "Interrupts received: %d\n",
24537- atomic_read(&dev_priv->irq_received));
24538+ atomic_read_unchecked(&dev_priv->irq_received));
24539 for (i = 0; i < I915_NUM_RINGS; i++) {
24540 if (IS_GEN6(dev)) {
24541 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
24542diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c
24543--- linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-09-02 18:11:21.000000000 -0400
24544+++ linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
24545@@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
24546 bool can_switch;
24547
24548 spin_lock(&dev->count_lock);
24549- can_switch = (dev->open_count == 0);
24550+ can_switch = (local_read(&dev->open_count) == 0);
24551 spin_unlock(&dev->count_lock);
24552 return can_switch;
24553 }
24554diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h
24555--- linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
24556+++ linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
24557@@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
24558 /* render clock increase/decrease */
24559 /* display clock increase/decrease */
24560 /* pll clock increase/decrease */
24561-};
24562+} __no_const;
24563
24564 struct intel_device_info {
24565 u8 gen;
24566@@ -300,7 +300,7 @@ typedef struct drm_i915_private {
24567 int current_page;
24568 int page_flipping;
24569
24570- atomic_t irq_received;
24571+ atomic_unchecked_t irq_received;
24572
24573 /* protects the irq masks */
24574 spinlock_t irq_lock;
24575@@ -874,7 +874,7 @@ struct drm_i915_gem_object {
24576 * will be page flipped away on the next vblank. When it
24577 * reaches 0, dev_priv->pending_flip_queue will be woken up.
24578 */
24579- atomic_t pending_flip;
24580+ atomic_unchecked_t pending_flip;
24581 };
24582
24583 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
24584@@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
24585 extern void intel_teardown_gmbus(struct drm_device *dev);
24586 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
24587 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
24588-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24589+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24590 {
24591 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
24592 }
24593diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
24594--- linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
24595+++ linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
24596@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
24597 i915_gem_clflush_object(obj);
24598
24599 if (obj->base.pending_write_domain)
24600- cd->flips |= atomic_read(&obj->pending_flip);
24601+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
24602
24603 /* The actual obj->write_domain will be updated with
24604 * pending_write_domain after we emit the accumulated flush for all
24605diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c
24606--- linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-09-02 18:11:21.000000000 -0400
24607+++ linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
24608@@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
24609 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
24610 struct drm_i915_master_private *master_priv;
24611
24612- atomic_inc(&dev_priv->irq_received);
24613+ atomic_inc_unchecked(&dev_priv->irq_received);
24614
24615 /* disable master interrupt before clearing iir */
24616 de_ier = I915_READ(DEIER);
24617@@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
24618 struct drm_i915_master_private *master_priv;
24619 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
24620
24621- atomic_inc(&dev_priv->irq_received);
24622+ atomic_inc_unchecked(&dev_priv->irq_received);
24623
24624 if (IS_GEN6(dev))
24625 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
24626@@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
24627 int ret = IRQ_NONE, pipe;
24628 bool blc_event = false;
24629
24630- atomic_inc(&dev_priv->irq_received);
24631+ atomic_inc_unchecked(&dev_priv->irq_received);
24632
24633 iir = I915_READ(IIR);
24634
24635@@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
24636 {
24637 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24638
24639- atomic_set(&dev_priv->irq_received, 0);
24640+ atomic_set_unchecked(&dev_priv->irq_received, 0);
24641
24642 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24643 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24644@@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
24645 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24646 int pipe;
24647
24648- atomic_set(&dev_priv->irq_received, 0);
24649+ atomic_set_unchecked(&dev_priv->irq_received, 0);
24650
24651 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24652 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24653diff -urNp linux-3.0.4/drivers/gpu/drm/i915/intel_display.c linux-3.0.4/drivers/gpu/drm/i915/intel_display.c
24654--- linux-3.0.4/drivers/gpu/drm/i915/intel_display.c 2011-09-02 18:11:21.000000000 -0400
24655+++ linux-3.0.4/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
24656@@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
24657
24658 wait_event(dev_priv->pending_flip_queue,
24659 atomic_read(&dev_priv->mm.wedged) ||
24660- atomic_read(&obj->pending_flip) == 0);
24661+ atomic_read_unchecked(&obj->pending_flip) == 0);
24662
24663 /* Big Hammer, we also need to ensure that any pending
24664 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
24665@@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
24666 obj = to_intel_framebuffer(crtc->fb)->obj;
24667 dev_priv = crtc->dev->dev_private;
24668 wait_event(dev_priv->pending_flip_queue,
24669- atomic_read(&obj->pending_flip) == 0);
24670+ atomic_read_unchecked(&obj->pending_flip) == 0);
24671 }
24672
24673 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
24674@@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
24675
24676 atomic_clear_mask(1 << intel_crtc->plane,
24677 &obj->pending_flip.counter);
24678- if (atomic_read(&obj->pending_flip) == 0)
24679+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
24680 wake_up(&dev_priv->pending_flip_queue);
24681
24682 schedule_work(&work->work);
24683@@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
24684 /* Block clients from rendering to the new back buffer until
24685 * the flip occurs and the object is no longer visible.
24686 */
24687- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24688+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24689
24690 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
24691 if (ret)
24692@@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
24693 return 0;
24694
24695 cleanup_pending:
24696- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24697+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24698 cleanup_objs:
24699 drm_gem_object_unreference(&work->old_fb_obj->base);
24700 drm_gem_object_unreference(&obj->base);
24701diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h
24702--- linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
24703+++ linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
24704@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
24705 u32 clear_cmd;
24706 u32 maccess;
24707
24708- atomic_t vbl_received; /**< Number of vblanks received. */
24709+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
24710 wait_queue_head_t fence_queue;
24711- atomic_t last_fence_retired;
24712+ atomic_unchecked_t last_fence_retired;
24713 u32 next_fence_to_post;
24714
24715 unsigned int fb_cpp;
24716diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c
24717--- linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
24718+++ linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
24719@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
24720 if (crtc != 0)
24721 return 0;
24722
24723- return atomic_read(&dev_priv->vbl_received);
24724+ return atomic_read_unchecked(&dev_priv->vbl_received);
24725 }
24726
24727
24728@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24729 /* VBLANK interrupt */
24730 if (status & MGA_VLINEPEN) {
24731 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
24732- atomic_inc(&dev_priv->vbl_received);
24733+ atomic_inc_unchecked(&dev_priv->vbl_received);
24734 drm_handle_vblank(dev, 0);
24735 handled = 1;
24736 }
24737@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24738 if ((prim_start & ~0x03) != (prim_end & ~0x03))
24739 MGA_WRITE(MGA_PRIMEND, prim_end);
24740
24741- atomic_inc(&dev_priv->last_fence_retired);
24742+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
24743 DRM_WAKEUP(&dev_priv->fence_queue);
24744 handled = 1;
24745 }
24746@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
24747 * using fences.
24748 */
24749 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
24750- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
24751+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
24752 - *sequence) <= (1 << 23)));
24753
24754 *sequence = cur_fence;
24755diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c
24756--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
24757+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-26 19:49:56.000000000 -0400
24758@@ -200,7 +200,7 @@ struct methods {
24759 const char desc[8];
24760 void (*loadbios)(struct drm_device *, uint8_t *);
24761 const bool rw;
24762-};
24763+} __do_const;
24764
24765 static struct methods shadow_methods[] = {
24766 { "PRAMIN", load_vbios_pramin, true },
24767@@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
24768 struct bit_table {
24769 const char id;
24770 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
24771-};
24772+} __no_const;
24773
24774 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
24775
24776diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h
24777--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
24778+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
24779@@ -227,7 +227,7 @@ struct nouveau_channel {
24780 struct list_head pending;
24781 uint32_t sequence;
24782 uint32_t sequence_ack;
24783- atomic_t last_sequence_irq;
24784+ atomic_unchecked_t last_sequence_irq;
24785 } fence;
24786
24787 /* DMA push buffer */
24788@@ -304,7 +304,7 @@ struct nouveau_exec_engine {
24789 u32 handle, u16 class);
24790 void (*set_tile_region)(struct drm_device *dev, int i);
24791 void (*tlb_flush)(struct drm_device *, int engine);
24792-};
24793+} __no_const;
24794
24795 struct nouveau_instmem_engine {
24796 void *priv;
24797@@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
24798 struct nouveau_mc_engine {
24799 int (*init)(struct drm_device *dev);
24800 void (*takedown)(struct drm_device *dev);
24801-};
24802+} __no_const;
24803
24804 struct nouveau_timer_engine {
24805 int (*init)(struct drm_device *dev);
24806 void (*takedown)(struct drm_device *dev);
24807 uint64_t (*read)(struct drm_device *dev);
24808-};
24809+} __no_const;
24810
24811 struct nouveau_fb_engine {
24812 int num_tiles;
24813@@ -494,7 +494,7 @@ struct nouveau_vram_engine {
24814 void (*put)(struct drm_device *, struct nouveau_mem **);
24815
24816 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
24817-};
24818+} __no_const;
24819
24820 struct nouveau_engine {
24821 struct nouveau_instmem_engine instmem;
24822@@ -640,7 +640,7 @@ struct drm_nouveau_private {
24823 struct drm_global_reference mem_global_ref;
24824 struct ttm_bo_global_ref bo_global_ref;
24825 struct ttm_bo_device bdev;
24826- atomic_t validate_sequence;
24827+ atomic_unchecked_t validate_sequence;
24828 } ttm;
24829
24830 struct {
24831diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c
24832--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
24833+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
24834@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
24835 if (USE_REFCNT(dev))
24836 sequence = nvchan_rd32(chan, 0x48);
24837 else
24838- sequence = atomic_read(&chan->fence.last_sequence_irq);
24839+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
24840
24841 if (chan->fence.sequence_ack == sequence)
24842 goto out;
24843@@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
24844
24845 INIT_LIST_HEAD(&chan->fence.pending);
24846 spin_lock_init(&chan->fence.lock);
24847- atomic_set(&chan->fence.last_sequence_irq, 0);
24848+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
24849 return 0;
24850 }
24851
24852diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c
24853--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
24854+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
24855@@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
24856 int trycnt = 0;
24857 int ret, i;
24858
24859- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
24860+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
24861 retry:
24862 if (++trycnt > 100000) {
24863 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
24864diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c
24865--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
24866+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
24867@@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
24868 bool can_switch;
24869
24870 spin_lock(&dev->count_lock);
24871- can_switch = (dev->open_count == 0);
24872+ can_switch = (local_read(&dev->open_count) == 0);
24873 spin_unlock(&dev->count_lock);
24874 return can_switch;
24875 }
24876diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c
24877--- linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
24878+++ linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
24879@@ -560,7 +560,7 @@ static int
24880 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
24881 u32 class, u32 mthd, u32 data)
24882 {
24883- atomic_set(&chan->fence.last_sequence_irq, data);
24884+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
24885 return 0;
24886 }
24887
24888diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c
24889--- linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
24890+++ linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
24891@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
24892
24893 /* GH: Simple idle check.
24894 */
24895- atomic_set(&dev_priv->idle_count, 0);
24896+ atomic_set_unchecked(&dev_priv->idle_count, 0);
24897
24898 /* We don't support anything other than bus-mastering ring mode,
24899 * but the ring can be in either AGP or PCI space for the ring
24900diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h
24901--- linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
24902+++ linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
24903@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
24904 int is_pci;
24905 unsigned long cce_buffers_offset;
24906
24907- atomic_t idle_count;
24908+ atomic_unchecked_t idle_count;
24909
24910 int page_flipping;
24911 int current_page;
24912 u32 crtc_offset;
24913 u32 crtc_offset_cntl;
24914
24915- atomic_t vbl_received;
24916+ atomic_unchecked_t vbl_received;
24917
24918 u32 color_fmt;
24919 unsigned int front_offset;
24920diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c
24921--- linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
24922+++ linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
24923@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
24924 if (crtc != 0)
24925 return 0;
24926
24927- return atomic_read(&dev_priv->vbl_received);
24928+ return atomic_read_unchecked(&dev_priv->vbl_received);
24929 }
24930
24931 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
24932@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
24933 /* VBLANK interrupt */
24934 if (status & R128_CRTC_VBLANK_INT) {
24935 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
24936- atomic_inc(&dev_priv->vbl_received);
24937+ atomic_inc_unchecked(&dev_priv->vbl_received);
24938 drm_handle_vblank(dev, 0);
24939 return IRQ_HANDLED;
24940 }
24941diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_state.c linux-3.0.4/drivers/gpu/drm/r128/r128_state.c
24942--- linux-3.0.4/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
24943+++ linux-3.0.4/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
24944@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
24945
24946 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
24947 {
24948- if (atomic_read(&dev_priv->idle_count) == 0)
24949+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
24950 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
24951 else
24952- atomic_set(&dev_priv->idle_count, 0);
24953+ atomic_set_unchecked(&dev_priv->idle_count, 0);
24954 }
24955
24956 #endif
24957diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/atom.c linux-3.0.4/drivers/gpu/drm/radeon/atom.c
24958--- linux-3.0.4/drivers/gpu/drm/radeon/atom.c 2011-07-21 22:17:23.000000000 -0400
24959+++ linux-3.0.4/drivers/gpu/drm/radeon/atom.c 2011-08-23 21:48:14.000000000 -0400
24960@@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
24961 char name[512];
24962 int i;
24963
24964+ pax_track_stack();
24965+
24966 ctx->card = card;
24967 ctx->bios = bios;
24968
24969diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c
24970--- linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
24971+++ linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
24972@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
24973 regex_t mask_rex;
24974 regmatch_t match[4];
24975 char buf[1024];
24976- size_t end;
24977+ long end;
24978 int len;
24979 int done = 0;
24980 int r;
24981 unsigned o;
24982 struct offset *offset;
24983 char last_reg_s[10];
24984- int last_reg;
24985+ unsigned long last_reg;
24986
24987 if (regcomp
24988 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
24989diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c
24990--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
24991+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
24992@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
24993 struct radeon_gpio_rec gpio;
24994 struct radeon_hpd hpd;
24995
24996+ pax_track_stack();
24997+
24998 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
24999 return false;
25000
25001diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c
25002--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c 2011-09-02 18:11:21.000000000 -0400
25003+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
25004@@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
25005 bool can_switch;
25006
25007 spin_lock(&dev->count_lock);
25008- can_switch = (dev->open_count == 0);
25009+ can_switch = (local_read(&dev->open_count) == 0);
25010 spin_unlock(&dev->count_lock);
25011 return can_switch;
25012 }
25013diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c
25014--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-09-02 18:11:21.000000000 -0400
25015+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
25016@@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
25017 uint32_t post_div;
25018 u32 pll_out_min, pll_out_max;
25019
25020+ pax_track_stack();
25021+
25022 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25023 freq = freq * 1000;
25024
25025diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h
25026--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
25027+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
25028@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25029
25030 /* SW interrupt */
25031 wait_queue_head_t swi_queue;
25032- atomic_t swi_emitted;
25033+ atomic_unchecked_t swi_emitted;
25034 int vblank_crtc;
25035 uint32_t irq_enable_reg;
25036 uint32_t r500_disp_irq_reg;
25037diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c
25038--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
25039+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
25040@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
25041 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25042 return 0;
25043 }
25044- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25045+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25046 if (!rdev->cp.ready)
25047 /* FIXME: cp is not running assume everythings is done right
25048 * away
25049@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
25050 return r;
25051 }
25052 radeon_fence_write(rdev, 0);
25053- atomic_set(&rdev->fence_drv.seq, 0);
25054+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25055 INIT_LIST_HEAD(&rdev->fence_drv.created);
25056 INIT_LIST_HEAD(&rdev->fence_drv.emited);
25057 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25058diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon.h linux-3.0.4/drivers/gpu/drm/radeon/radeon.h
25059--- linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-07-21 22:17:23.000000000 -0400
25060+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-08-23 21:47:55.000000000 -0400
25061@@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
25062 */
25063 struct radeon_fence_driver {
25064 uint32_t scratch_reg;
25065- atomic_t seq;
25066+ atomic_unchecked_t seq;
25067 uint32_t last_seq;
25068 unsigned long last_jiffies;
25069 unsigned long last_timeout;
25070@@ -960,7 +960,7 @@ struct radeon_asic {
25071 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25072 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25073 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25074-};
25075+} __no_const;
25076
25077 /*
25078 * Asic structures
25079diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c
25080--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
25081+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
25082@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25083 request = compat_alloc_user_space(sizeof(*request));
25084 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25085 || __put_user(req32.param, &request->param)
25086- || __put_user((void __user *)(unsigned long)req32.value,
25087+ || __put_user((unsigned long)req32.value,
25088 &request->value))
25089 return -EFAULT;
25090
25091diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c
25092--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
25093+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
25094@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25095 unsigned int ret;
25096 RING_LOCALS;
25097
25098- atomic_inc(&dev_priv->swi_emitted);
25099- ret = atomic_read(&dev_priv->swi_emitted);
25100+ atomic_inc_unchecked(&dev_priv->swi_emitted);
25101+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25102
25103 BEGIN_RING(4);
25104 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25105@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25106 drm_radeon_private_t *dev_priv =
25107 (drm_radeon_private_t *) dev->dev_private;
25108
25109- atomic_set(&dev_priv->swi_emitted, 0);
25110+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25111 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25112
25113 dev->max_vblank_count = 0x001fffff;
25114diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c
25115--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
25116+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
25117@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25118 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25119 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25120
25121- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25122+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25123 sarea_priv->nbox * sizeof(depth_boxes[0])))
25124 return -EFAULT;
25125
25126@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25127 {
25128 drm_radeon_private_t *dev_priv = dev->dev_private;
25129 drm_radeon_getparam_t *param = data;
25130- int value;
25131+ int value = 0;
25132
25133 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25134
25135diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c
25136--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-07-21 22:17:23.000000000 -0400
25137+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-23 21:47:55.000000000 -0400
25138@@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25139 }
25140 if (unlikely(ttm_vm_ops == NULL)) {
25141 ttm_vm_ops = vma->vm_ops;
25142- radeon_ttm_vm_ops = *ttm_vm_ops;
25143- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25144+ pax_open_kernel();
25145+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25146+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25147+ pax_close_kernel();
25148 }
25149 vma->vm_ops = &radeon_ttm_vm_ops;
25150 return 0;
25151diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/rs690.c linux-3.0.4/drivers/gpu/drm/radeon/rs690.c
25152--- linux-3.0.4/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
25153+++ linux-3.0.4/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
25154@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25155 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25156 rdev->pm.sideport_bandwidth.full)
25157 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25158- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25159+ read_delay_latency.full = dfixed_const(800 * 1000);
25160 read_delay_latency.full = dfixed_div(read_delay_latency,
25161 rdev->pm.igp_sideport_mclk);
25162+ a.full = dfixed_const(370);
25163+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25164 } else {
25165 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25166 rdev->pm.k8_bandwidth.full)
25167diff -urNp linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c
25168--- linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
25169+++ linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
25170@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
25171 static int ttm_pool_mm_shrink(struct shrinker *shrink,
25172 struct shrink_control *sc)
25173 {
25174- static atomic_t start_pool = ATOMIC_INIT(0);
25175+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25176 unsigned i;
25177- unsigned pool_offset = atomic_add_return(1, &start_pool);
25178+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25179 struct ttm_page_pool *pool;
25180 int shrink_pages = sc->nr_to_scan;
25181
25182diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_drv.h linux-3.0.4/drivers/gpu/drm/via/via_drv.h
25183--- linux-3.0.4/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
25184+++ linux-3.0.4/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
25185@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25186 typedef uint32_t maskarray_t[5];
25187
25188 typedef struct drm_via_irq {
25189- atomic_t irq_received;
25190+ atomic_unchecked_t irq_received;
25191 uint32_t pending_mask;
25192 uint32_t enable_mask;
25193 wait_queue_head_t irq_queue;
25194@@ -75,7 +75,7 @@ typedef struct drm_via_private {
25195 struct timeval last_vblank;
25196 int last_vblank_valid;
25197 unsigned usec_per_vblank;
25198- atomic_t vbl_received;
25199+ atomic_unchecked_t vbl_received;
25200 drm_via_state_t hc_state;
25201 char pci_buf[VIA_PCI_BUF_SIZE];
25202 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25203diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_irq.c linux-3.0.4/drivers/gpu/drm/via/via_irq.c
25204--- linux-3.0.4/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
25205+++ linux-3.0.4/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
25206@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25207 if (crtc != 0)
25208 return 0;
25209
25210- return atomic_read(&dev_priv->vbl_received);
25211+ return atomic_read_unchecked(&dev_priv->vbl_received);
25212 }
25213
25214 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25215@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25216
25217 status = VIA_READ(VIA_REG_INTERRUPT);
25218 if (status & VIA_IRQ_VBLANK_PENDING) {
25219- atomic_inc(&dev_priv->vbl_received);
25220- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25221+ atomic_inc_unchecked(&dev_priv->vbl_received);
25222+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25223 do_gettimeofday(&cur_vblank);
25224 if (dev_priv->last_vblank_valid) {
25225 dev_priv->usec_per_vblank =
25226@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25227 dev_priv->last_vblank = cur_vblank;
25228 dev_priv->last_vblank_valid = 1;
25229 }
25230- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25231+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25232 DRM_DEBUG("US per vblank is: %u\n",
25233 dev_priv->usec_per_vblank);
25234 }
25235@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25236
25237 for (i = 0; i < dev_priv->num_irqs; ++i) {
25238 if (status & cur_irq->pending_mask) {
25239- atomic_inc(&cur_irq->irq_received);
25240+ atomic_inc_unchecked(&cur_irq->irq_received);
25241 DRM_WAKEUP(&cur_irq->irq_queue);
25242 handled = 1;
25243 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25244@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25245 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25246 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25247 masks[irq][4]));
25248- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25249+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25250 } else {
25251 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25252 (((cur_irq_sequence =
25253- atomic_read(&cur_irq->irq_received)) -
25254+ atomic_read_unchecked(&cur_irq->irq_received)) -
25255 *sequence) <= (1 << 23)));
25256 }
25257 *sequence = cur_irq_sequence;
25258@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25259 }
25260
25261 for (i = 0; i < dev_priv->num_irqs; ++i) {
25262- atomic_set(&cur_irq->irq_received, 0);
25263+ atomic_set_unchecked(&cur_irq->irq_received, 0);
25264 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25265 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25266 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25267@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25268 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25269 case VIA_IRQ_RELATIVE:
25270 irqwait->request.sequence +=
25271- atomic_read(&cur_irq->irq_received);
25272+ atomic_read_unchecked(&cur_irq->irq_received);
25273 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25274 case VIA_IRQ_ABSOLUTE:
25275 break;
25276diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25277--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
25278+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
25279@@ -240,7 +240,7 @@ struct vmw_private {
25280 * Fencing and IRQs.
25281 */
25282
25283- atomic_t fence_seq;
25284+ atomic_unchecked_t fence_seq;
25285 wait_queue_head_t fence_queue;
25286 wait_queue_head_t fifo_queue;
25287 atomic_t fence_queue_waiters;
25288diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25289--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
25290+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
25291@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25292 while (!vmw_lag_lt(queue, us)) {
25293 spin_lock(&queue->lock);
25294 if (list_empty(&queue->head))
25295- sequence = atomic_read(&dev_priv->fence_seq);
25296+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25297 else {
25298 fence = list_first_entry(&queue->head,
25299 struct vmw_fence, head);
25300diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25301--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
25302+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-23 21:47:55.000000000 -0400
25303@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25304 (unsigned int) min,
25305 (unsigned int) fifo->capabilities);
25306
25307- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25308+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25309 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25310 vmw_fence_queue_init(&fifo->fence_queue);
25311 return vmw_fifo_send_fence(dev_priv, &dummy);
25312@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25313
25314 fm = vmw_fifo_reserve(dev_priv, bytes);
25315 if (unlikely(fm == NULL)) {
25316- *sequence = atomic_read(&dev_priv->fence_seq);
25317+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25318 ret = -ENOMEM;
25319 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25320 false, 3*HZ);
25321@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25322 }
25323
25324 do {
25325- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25326+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25327 } while (*sequence == 0);
25328
25329 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25330diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25331--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
25332+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
25333@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25334 * emitted. Then the fence is stale and signaled.
25335 */
25336
25337- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25338+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25339 > VMW_FENCE_WRAP);
25340
25341 return ret;
25342@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25343
25344 if (fifo_idle)
25345 down_read(&fifo_state->rwsem);
25346- signal_seq = atomic_read(&dev_priv->fence_seq);
25347+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25348 ret = 0;
25349
25350 for (;;) {
25351diff -urNp linux-3.0.4/drivers/hid/hid-core.c linux-3.0.4/drivers/hid/hid-core.c
25352--- linux-3.0.4/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
25353+++ linux-3.0.4/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
25354@@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
25355
25356 int hid_add_device(struct hid_device *hdev)
25357 {
25358- static atomic_t id = ATOMIC_INIT(0);
25359+ static atomic_unchecked_t id = ATOMIC_INIT(0);
25360 int ret;
25361
25362 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25363@@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
25364 /* XXX hack, any other cleaner solution after the driver core
25365 * is converted to allow more than 20 bytes as the device name? */
25366 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25367- hdev->vendor, hdev->product, atomic_inc_return(&id));
25368+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25369
25370 hid_debug_register(hdev, dev_name(&hdev->dev));
25371 ret = device_add(&hdev->dev);
25372diff -urNp linux-3.0.4/drivers/hid/usbhid/hiddev.c linux-3.0.4/drivers/hid/usbhid/hiddev.c
25373--- linux-3.0.4/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
25374+++ linux-3.0.4/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
25375@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
25376 break;
25377
25378 case HIDIOCAPPLICATION:
25379- if (arg < 0 || arg >= hid->maxapplication)
25380+ if (arg >= hid->maxapplication)
25381 break;
25382
25383 for (i = 0; i < hid->maxcollection; i++)
25384diff -urNp linux-3.0.4/drivers/hwmon/acpi_power_meter.c linux-3.0.4/drivers/hwmon/acpi_power_meter.c
25385--- linux-3.0.4/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
25386+++ linux-3.0.4/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
25387@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
25388 return res;
25389
25390 temp /= 1000;
25391- if (temp < 0)
25392- return -EINVAL;
25393
25394 mutex_lock(&resource->lock);
25395 resource->trip[attr->index - 7] = temp;
25396diff -urNp linux-3.0.4/drivers/hwmon/sht15.c linux-3.0.4/drivers/hwmon/sht15.c
25397--- linux-3.0.4/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
25398+++ linux-3.0.4/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
25399@@ -166,7 +166,7 @@ struct sht15_data {
25400 int supply_uV;
25401 bool supply_uV_valid;
25402 struct work_struct update_supply_work;
25403- atomic_t interrupt_handled;
25404+ atomic_unchecked_t interrupt_handled;
25405 };
25406
25407 /**
25408@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
25409 return ret;
25410
25411 gpio_direction_input(data->pdata->gpio_data);
25412- atomic_set(&data->interrupt_handled, 0);
25413+ atomic_set_unchecked(&data->interrupt_handled, 0);
25414
25415 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25416 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25417 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25418 /* Only relevant if the interrupt hasn't occurred. */
25419- if (!atomic_read(&data->interrupt_handled))
25420+ if (!atomic_read_unchecked(&data->interrupt_handled))
25421 schedule_work(&data->read_work);
25422 }
25423 ret = wait_event_timeout(data->wait_queue,
25424@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
25425
25426 /* First disable the interrupt */
25427 disable_irq_nosync(irq);
25428- atomic_inc(&data->interrupt_handled);
25429+ atomic_inc_unchecked(&data->interrupt_handled);
25430 /* Then schedule a reading work struct */
25431 if (data->state != SHT15_READING_NOTHING)
25432 schedule_work(&data->read_work);
25433@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
25434 * If not, then start the interrupt again - care here as could
25435 * have gone low in meantime so verify it hasn't!
25436 */
25437- atomic_set(&data->interrupt_handled, 0);
25438+ atomic_set_unchecked(&data->interrupt_handled, 0);
25439 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25440 /* If still not occurred or another handler has been scheduled */
25441 if (gpio_get_value(data->pdata->gpio_data)
25442- || atomic_read(&data->interrupt_handled))
25443+ || atomic_read_unchecked(&data->interrupt_handled))
25444 return;
25445 }
25446
25447diff -urNp linux-3.0.4/drivers/hwmon/w83791d.c linux-3.0.4/drivers/hwmon/w83791d.c
25448--- linux-3.0.4/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
25449+++ linux-3.0.4/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
25450@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25451 struct i2c_board_info *info);
25452 static int w83791d_remove(struct i2c_client *client);
25453
25454-static int w83791d_read(struct i2c_client *client, u8 register);
25455-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25456+static int w83791d_read(struct i2c_client *client, u8 reg);
25457+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25458 static struct w83791d_data *w83791d_update_device(struct device *dev);
25459
25460 #ifdef DEBUG
25461diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c
25462--- linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
25463+++ linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
25464@@ -43,7 +43,7 @@
25465 extern struct i2c_adapter amd756_smbus;
25466
25467 static struct i2c_adapter *s4882_adapter;
25468-static struct i2c_algorithm *s4882_algo;
25469+static i2c_algorithm_no_const *s4882_algo;
25470
25471 /* Wrapper access functions for multiplexed SMBus */
25472 static DEFINE_MUTEX(amd756_lock);
25473diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c
25474--- linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
25475+++ linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
25476@@ -41,7 +41,7 @@
25477 extern struct i2c_adapter *nforce2_smbus;
25478
25479 static struct i2c_adapter *s4985_adapter;
25480-static struct i2c_algorithm *s4985_algo;
25481+static i2c_algorithm_no_const *s4985_algo;
25482
25483 /* Wrapper access functions for multiplexed SMBus */
25484 static DEFINE_MUTEX(nforce2_lock);
25485diff -urNp linux-3.0.4/drivers/i2c/i2c-mux.c linux-3.0.4/drivers/i2c/i2c-mux.c
25486--- linux-3.0.4/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
25487+++ linux-3.0.4/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
25488@@ -28,7 +28,7 @@
25489 /* multiplexer per channel data */
25490 struct i2c_mux_priv {
25491 struct i2c_adapter adap;
25492- struct i2c_algorithm algo;
25493+ i2c_algorithm_no_const algo;
25494
25495 struct i2c_adapter *parent;
25496 void *mux_dev; /* the mux chip/device */
25497diff -urNp linux-3.0.4/drivers/ide/ide-cd.c linux-3.0.4/drivers/ide/ide-cd.c
25498--- linux-3.0.4/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
25499+++ linux-3.0.4/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
25500@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
25501 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25502 if ((unsigned long)buf & alignment
25503 || blk_rq_bytes(rq) & q->dma_pad_mask
25504- || object_is_on_stack(buf))
25505+ || object_starts_on_stack(buf))
25506 drive->dma = 0;
25507 }
25508 }
25509diff -urNp linux-3.0.4/drivers/ide/ide-floppy.c linux-3.0.4/drivers/ide/ide-floppy.c
25510--- linux-3.0.4/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
25511+++ linux-3.0.4/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
25512@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
25513 u8 pc_buf[256], header_len, desc_cnt;
25514 int i, rc = 1, blocks, length;
25515
25516+ pax_track_stack();
25517+
25518 ide_debug_log(IDE_DBG_FUNC, "enter");
25519
25520 drive->bios_cyl = 0;
25521diff -urNp linux-3.0.4/drivers/ide/setup-pci.c linux-3.0.4/drivers/ide/setup-pci.c
25522--- linux-3.0.4/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
25523+++ linux-3.0.4/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
25524@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
25525 int ret, i, n_ports = dev2 ? 4 : 2;
25526 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
25527
25528+ pax_track_stack();
25529+
25530 for (i = 0; i < n_ports / 2; i++) {
25531 ret = ide_setup_pci_controller(pdev[i], d, !i);
25532 if (ret < 0)
25533diff -urNp linux-3.0.4/drivers/infiniband/core/cm.c linux-3.0.4/drivers/infiniband/core/cm.c
25534--- linux-3.0.4/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
25535+++ linux-3.0.4/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
25536@@ -113,7 +113,7 @@ static char const counter_group_names[CM
25537
25538 struct cm_counter_group {
25539 struct kobject obj;
25540- atomic_long_t counter[CM_ATTR_COUNT];
25541+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25542 };
25543
25544 struct cm_counter_attribute {
25545@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25546 struct ib_mad_send_buf *msg = NULL;
25547 int ret;
25548
25549- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25550+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25551 counter[CM_REQ_COUNTER]);
25552
25553 /* Quick state check to discard duplicate REQs. */
25554@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25555 if (!cm_id_priv)
25556 return;
25557
25558- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25559+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25560 counter[CM_REP_COUNTER]);
25561 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25562 if (ret)
25563@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25564 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25565 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25566 spin_unlock_irq(&cm_id_priv->lock);
25567- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25568+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25569 counter[CM_RTU_COUNTER]);
25570 goto out;
25571 }
25572@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
25573 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25574 dreq_msg->local_comm_id);
25575 if (!cm_id_priv) {
25576- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25577+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25578 counter[CM_DREQ_COUNTER]);
25579 cm_issue_drep(work->port, work->mad_recv_wc);
25580 return -EINVAL;
25581@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
25582 case IB_CM_MRA_REP_RCVD:
25583 break;
25584 case IB_CM_TIMEWAIT:
25585- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25586+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25587 counter[CM_DREQ_COUNTER]);
25588 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25589 goto unlock;
25590@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
25591 cm_free_msg(msg);
25592 goto deref;
25593 case IB_CM_DREQ_RCVD:
25594- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25595+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25596 counter[CM_DREQ_COUNTER]);
25597 goto unlock;
25598 default:
25599@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
25600 ib_modify_mad(cm_id_priv->av.port->mad_agent,
25601 cm_id_priv->msg, timeout)) {
25602 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25603- atomic_long_inc(&work->port->
25604+ atomic_long_inc_unchecked(&work->port->
25605 counter_group[CM_RECV_DUPLICATES].
25606 counter[CM_MRA_COUNTER]);
25607 goto out;
25608@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
25609 break;
25610 case IB_CM_MRA_REQ_RCVD:
25611 case IB_CM_MRA_REP_RCVD:
25612- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25613+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25614 counter[CM_MRA_COUNTER]);
25615 /* fall through */
25616 default:
25617@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
25618 case IB_CM_LAP_IDLE:
25619 break;
25620 case IB_CM_MRA_LAP_SENT:
25621- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25622+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25623 counter[CM_LAP_COUNTER]);
25624 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25625 goto unlock;
25626@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
25627 cm_free_msg(msg);
25628 goto deref;
25629 case IB_CM_LAP_RCVD:
25630- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25631+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25632 counter[CM_LAP_COUNTER]);
25633 goto unlock;
25634 default:
25635@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
25636 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25637 if (cur_cm_id_priv) {
25638 spin_unlock_irq(&cm.lock);
25639- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25640+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25641 counter[CM_SIDR_REQ_COUNTER]);
25642 goto out; /* Duplicate message. */
25643 }
25644@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
25645 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25646 msg->retries = 1;
25647
25648- atomic_long_add(1 + msg->retries,
25649+ atomic_long_add_unchecked(1 + msg->retries,
25650 &port->counter_group[CM_XMIT].counter[attr_index]);
25651 if (msg->retries)
25652- atomic_long_add(msg->retries,
25653+ atomic_long_add_unchecked(msg->retries,
25654 &port->counter_group[CM_XMIT_RETRIES].
25655 counter[attr_index]);
25656
25657@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
25658 }
25659
25660 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25661- atomic_long_inc(&port->counter_group[CM_RECV].
25662+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25663 counter[attr_id - CM_ATTR_ID_OFFSET]);
25664
25665 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25666@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
25667 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25668
25669 return sprintf(buf, "%ld\n",
25670- atomic_long_read(&group->counter[cm_attr->index]));
25671+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25672 }
25673
25674 static const struct sysfs_ops cm_counter_ops = {
25675diff -urNp linux-3.0.4/drivers/infiniband/core/fmr_pool.c linux-3.0.4/drivers/infiniband/core/fmr_pool.c
25676--- linux-3.0.4/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
25677+++ linux-3.0.4/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
25678@@ -97,8 +97,8 @@ struct ib_fmr_pool {
25679
25680 struct task_struct *thread;
25681
25682- atomic_t req_ser;
25683- atomic_t flush_ser;
25684+ atomic_unchecked_t req_ser;
25685+ atomic_unchecked_t flush_ser;
25686
25687 wait_queue_head_t force_wait;
25688 };
25689@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
25690 struct ib_fmr_pool *pool = pool_ptr;
25691
25692 do {
25693- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
25694+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
25695 ib_fmr_batch_release(pool);
25696
25697- atomic_inc(&pool->flush_ser);
25698+ atomic_inc_unchecked(&pool->flush_ser);
25699 wake_up_interruptible(&pool->force_wait);
25700
25701 if (pool->flush_function)
25702@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
25703 }
25704
25705 set_current_state(TASK_INTERRUPTIBLE);
25706- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
25707+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
25708 !kthread_should_stop())
25709 schedule();
25710 __set_current_state(TASK_RUNNING);
25711@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
25712 pool->dirty_watermark = params->dirty_watermark;
25713 pool->dirty_len = 0;
25714 spin_lock_init(&pool->pool_lock);
25715- atomic_set(&pool->req_ser, 0);
25716- atomic_set(&pool->flush_ser, 0);
25717+ atomic_set_unchecked(&pool->req_ser, 0);
25718+ atomic_set_unchecked(&pool->flush_ser, 0);
25719 init_waitqueue_head(&pool->force_wait);
25720
25721 pool->thread = kthread_run(ib_fmr_cleanup_thread,
25722@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
25723 }
25724 spin_unlock_irq(&pool->pool_lock);
25725
25726- serial = atomic_inc_return(&pool->req_ser);
25727+ serial = atomic_inc_return_unchecked(&pool->req_ser);
25728 wake_up_process(pool->thread);
25729
25730 if (wait_event_interruptible(pool->force_wait,
25731- atomic_read(&pool->flush_ser) - serial >= 0))
25732+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
25733 return -EINTR;
25734
25735 return 0;
25736@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
25737 } else {
25738 list_add_tail(&fmr->list, &pool->dirty_list);
25739 if (++pool->dirty_len >= pool->dirty_watermark) {
25740- atomic_inc(&pool->req_ser);
25741+ atomic_inc_unchecked(&pool->req_ser);
25742 wake_up_process(pool->thread);
25743 }
25744 }
25745diff -urNp linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c
25746--- linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
25747+++ linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
25748@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
25749 int err;
25750 struct fw_ri_tpte tpt;
25751 u32 stag_idx;
25752- static atomic_t key;
25753+ static atomic_unchecked_t key;
25754
25755 if (c4iw_fatal_error(rdev))
25756 return -EIO;
25757@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
25758 &rdev->resource.tpt_fifo_lock);
25759 if (!stag_idx)
25760 return -ENOMEM;
25761- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
25762+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
25763 }
25764 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
25765 __func__, stag_state, type, pdid, stag_idx);
25766diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c
25767--- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
25768+++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
25769@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
25770 struct infinipath_counters counters;
25771 struct ipath_devdata *dd;
25772
25773+ pax_track_stack();
25774+
25775 dd = file->f_path.dentry->d_inode->i_private;
25776 dd->ipath_f_read_counters(dd, &counters);
25777
25778diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c
25779--- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
25780+++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
25781@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25782 struct ib_atomic_eth *ateth;
25783 struct ipath_ack_entry *e;
25784 u64 vaddr;
25785- atomic64_t *maddr;
25786+ atomic64_unchecked_t *maddr;
25787 u64 sdata;
25788 u32 rkey;
25789 u8 next;
25790@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25791 IB_ACCESS_REMOTE_ATOMIC)))
25792 goto nack_acc_unlck;
25793 /* Perform atomic OP and save result. */
25794- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25795+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25796 sdata = be64_to_cpu(ateth->swap_data);
25797 e = &qp->s_ack_queue[qp->r_head_ack_queue];
25798 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
25799- (u64) atomic64_add_return(sdata, maddr) - sdata :
25800+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25801 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25802 be64_to_cpu(ateth->compare_data),
25803 sdata);
25804diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c
25805--- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
25806+++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
25807@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
25808 unsigned long flags;
25809 struct ib_wc wc;
25810 u64 sdata;
25811- atomic64_t *maddr;
25812+ atomic64_unchecked_t *maddr;
25813 enum ib_wc_status send_status;
25814
25815 /*
25816@@ -382,11 +382,11 @@ again:
25817 IB_ACCESS_REMOTE_ATOMIC)))
25818 goto acc_err;
25819 /* Perform atomic OP and save result. */
25820- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25821+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25822 sdata = wqe->wr.wr.atomic.compare_add;
25823 *(u64 *) sqp->s_sge.sge.vaddr =
25824 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
25825- (u64) atomic64_add_return(sdata, maddr) - sdata :
25826+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25827 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25828 sdata, wqe->wr.wr.atomic.swap);
25829 goto send_comp;
25830diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.c linux-3.0.4/drivers/infiniband/hw/nes/nes.c
25831--- linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
25832+++ linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
25833@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
25834 LIST_HEAD(nes_adapter_list);
25835 static LIST_HEAD(nes_dev_list);
25836
25837-atomic_t qps_destroyed;
25838+atomic_unchecked_t qps_destroyed;
25839
25840 static unsigned int ee_flsh_adapter;
25841 static unsigned int sysfs_nonidx_addr;
25842@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
25843 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
25844 struct nes_adapter *nesadapter = nesdev->nesadapter;
25845
25846- atomic_inc(&qps_destroyed);
25847+ atomic_inc_unchecked(&qps_destroyed);
25848
25849 /* Free the control structures */
25850
25851diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c
25852--- linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
25853+++ linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
25854@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
25855 u32 cm_packets_retrans;
25856 u32 cm_packets_created;
25857 u32 cm_packets_received;
25858-atomic_t cm_listens_created;
25859-atomic_t cm_listens_destroyed;
25860+atomic_unchecked_t cm_listens_created;
25861+atomic_unchecked_t cm_listens_destroyed;
25862 u32 cm_backlog_drops;
25863-atomic_t cm_loopbacks;
25864-atomic_t cm_nodes_created;
25865-atomic_t cm_nodes_destroyed;
25866-atomic_t cm_accel_dropped_pkts;
25867-atomic_t cm_resets_recvd;
25868+atomic_unchecked_t cm_loopbacks;
25869+atomic_unchecked_t cm_nodes_created;
25870+atomic_unchecked_t cm_nodes_destroyed;
25871+atomic_unchecked_t cm_accel_dropped_pkts;
25872+atomic_unchecked_t cm_resets_recvd;
25873
25874 static inline int mini_cm_accelerated(struct nes_cm_core *,
25875 struct nes_cm_node *);
25876@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
25877
25878 static struct nes_cm_core *g_cm_core;
25879
25880-atomic_t cm_connects;
25881-atomic_t cm_accepts;
25882-atomic_t cm_disconnects;
25883-atomic_t cm_closes;
25884-atomic_t cm_connecteds;
25885-atomic_t cm_connect_reqs;
25886-atomic_t cm_rejects;
25887+atomic_unchecked_t cm_connects;
25888+atomic_unchecked_t cm_accepts;
25889+atomic_unchecked_t cm_disconnects;
25890+atomic_unchecked_t cm_closes;
25891+atomic_unchecked_t cm_connecteds;
25892+atomic_unchecked_t cm_connect_reqs;
25893+atomic_unchecked_t cm_rejects;
25894
25895
25896 /**
25897@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
25898 kfree(listener);
25899 listener = NULL;
25900 ret = 0;
25901- atomic_inc(&cm_listens_destroyed);
25902+ atomic_inc_unchecked(&cm_listens_destroyed);
25903 } else {
25904 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
25905 }
25906@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
25907 cm_node->rem_mac);
25908
25909 add_hte_node(cm_core, cm_node);
25910- atomic_inc(&cm_nodes_created);
25911+ atomic_inc_unchecked(&cm_nodes_created);
25912
25913 return cm_node;
25914 }
25915@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
25916 }
25917
25918 atomic_dec(&cm_core->node_cnt);
25919- atomic_inc(&cm_nodes_destroyed);
25920+ atomic_inc_unchecked(&cm_nodes_destroyed);
25921 nesqp = cm_node->nesqp;
25922 if (nesqp) {
25923 nesqp->cm_node = NULL;
25924@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
25925
25926 static void drop_packet(struct sk_buff *skb)
25927 {
25928- atomic_inc(&cm_accel_dropped_pkts);
25929+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
25930 dev_kfree_skb_any(skb);
25931 }
25932
25933@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
25934 {
25935
25936 int reset = 0; /* whether to send reset in case of err.. */
25937- atomic_inc(&cm_resets_recvd);
25938+ atomic_inc_unchecked(&cm_resets_recvd);
25939 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
25940 " refcnt=%d\n", cm_node, cm_node->state,
25941 atomic_read(&cm_node->ref_count));
25942@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
25943 rem_ref_cm_node(cm_node->cm_core, cm_node);
25944 return NULL;
25945 }
25946- atomic_inc(&cm_loopbacks);
25947+ atomic_inc_unchecked(&cm_loopbacks);
25948 loopbackremotenode->loopbackpartner = cm_node;
25949 loopbackremotenode->tcp_cntxt.rcv_wscale =
25950 NES_CM_DEFAULT_RCV_WND_SCALE;
25951@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
25952 add_ref_cm_node(cm_node);
25953 } else if (cm_node->state == NES_CM_STATE_TSA) {
25954 rem_ref_cm_node(cm_core, cm_node);
25955- atomic_inc(&cm_accel_dropped_pkts);
25956+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
25957 dev_kfree_skb_any(skb);
25958 break;
25959 }
25960@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
25961
25962 if ((cm_id) && (cm_id->event_handler)) {
25963 if (issue_disconn) {
25964- atomic_inc(&cm_disconnects);
25965+ atomic_inc_unchecked(&cm_disconnects);
25966 cm_event.event = IW_CM_EVENT_DISCONNECT;
25967 cm_event.status = disconn_status;
25968 cm_event.local_addr = cm_id->local_addr;
25969@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
25970 }
25971
25972 if (issue_close) {
25973- atomic_inc(&cm_closes);
25974+ atomic_inc_unchecked(&cm_closes);
25975 nes_disconnect(nesqp, 1);
25976
25977 cm_id->provider_data = nesqp;
25978@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
25979
25980 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
25981 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
25982- atomic_inc(&cm_accepts);
25983+ atomic_inc_unchecked(&cm_accepts);
25984
25985 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
25986 netdev_refcnt_read(nesvnic->netdev));
25987@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
25988
25989 struct nes_cm_core *cm_core;
25990
25991- atomic_inc(&cm_rejects);
25992+ atomic_inc_unchecked(&cm_rejects);
25993 cm_node = (struct nes_cm_node *) cm_id->provider_data;
25994 loopback = cm_node->loopbackpartner;
25995 cm_core = cm_node->cm_core;
25996@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
25997 ntohl(cm_id->local_addr.sin_addr.s_addr),
25998 ntohs(cm_id->local_addr.sin_port));
25999
26000- atomic_inc(&cm_connects);
26001+ atomic_inc_unchecked(&cm_connects);
26002 nesqp->active_conn = 1;
26003
26004 /* cache the cm_id in the qp */
26005@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
26006 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
26007 return err;
26008 }
26009- atomic_inc(&cm_listens_created);
26010+ atomic_inc_unchecked(&cm_listens_created);
26011 }
26012
26013 cm_id->add_ref(cm_id);
26014@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
26015 if (nesqp->destroyed) {
26016 return;
26017 }
26018- atomic_inc(&cm_connecteds);
26019+ atomic_inc_unchecked(&cm_connecteds);
26020 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
26021 " local port 0x%04X. jiffies = %lu.\n",
26022 nesqp->hwqp.qp_id,
26023@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26024
26025 cm_id->add_ref(cm_id);
26026 ret = cm_id->event_handler(cm_id, &cm_event);
26027- atomic_inc(&cm_closes);
26028+ atomic_inc_unchecked(&cm_closes);
26029 cm_event.event = IW_CM_EVENT_CLOSE;
26030 cm_event.status = 0;
26031 cm_event.provider_data = cm_id->provider_data;
26032@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26033 return;
26034 cm_id = cm_node->cm_id;
26035
26036- atomic_inc(&cm_connect_reqs);
26037+ atomic_inc_unchecked(&cm_connect_reqs);
26038 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26039 cm_node, cm_id, jiffies);
26040
26041@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26042 return;
26043 cm_id = cm_node->cm_id;
26044
26045- atomic_inc(&cm_connect_reqs);
26046+ atomic_inc_unchecked(&cm_connect_reqs);
26047 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26048 cm_node, cm_id, jiffies);
26049
26050diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.h linux-3.0.4/drivers/infiniband/hw/nes/nes.h
26051--- linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
26052+++ linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
26053@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26054 extern unsigned int wqm_quanta;
26055 extern struct list_head nes_adapter_list;
26056
26057-extern atomic_t cm_connects;
26058-extern atomic_t cm_accepts;
26059-extern atomic_t cm_disconnects;
26060-extern atomic_t cm_closes;
26061-extern atomic_t cm_connecteds;
26062-extern atomic_t cm_connect_reqs;
26063-extern atomic_t cm_rejects;
26064-extern atomic_t mod_qp_timouts;
26065-extern atomic_t qps_created;
26066-extern atomic_t qps_destroyed;
26067-extern atomic_t sw_qps_destroyed;
26068+extern atomic_unchecked_t cm_connects;
26069+extern atomic_unchecked_t cm_accepts;
26070+extern atomic_unchecked_t cm_disconnects;
26071+extern atomic_unchecked_t cm_closes;
26072+extern atomic_unchecked_t cm_connecteds;
26073+extern atomic_unchecked_t cm_connect_reqs;
26074+extern atomic_unchecked_t cm_rejects;
26075+extern atomic_unchecked_t mod_qp_timouts;
26076+extern atomic_unchecked_t qps_created;
26077+extern atomic_unchecked_t qps_destroyed;
26078+extern atomic_unchecked_t sw_qps_destroyed;
26079 extern u32 mh_detected;
26080 extern u32 mh_pauses_sent;
26081 extern u32 cm_packets_sent;
26082@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26083 extern u32 cm_packets_received;
26084 extern u32 cm_packets_dropped;
26085 extern u32 cm_packets_retrans;
26086-extern atomic_t cm_listens_created;
26087-extern atomic_t cm_listens_destroyed;
26088+extern atomic_unchecked_t cm_listens_created;
26089+extern atomic_unchecked_t cm_listens_destroyed;
26090 extern u32 cm_backlog_drops;
26091-extern atomic_t cm_loopbacks;
26092-extern atomic_t cm_nodes_created;
26093-extern atomic_t cm_nodes_destroyed;
26094-extern atomic_t cm_accel_dropped_pkts;
26095-extern atomic_t cm_resets_recvd;
26096+extern atomic_unchecked_t cm_loopbacks;
26097+extern atomic_unchecked_t cm_nodes_created;
26098+extern atomic_unchecked_t cm_nodes_destroyed;
26099+extern atomic_unchecked_t cm_accel_dropped_pkts;
26100+extern atomic_unchecked_t cm_resets_recvd;
26101
26102 extern u32 int_mod_timer_init;
26103 extern u32 int_mod_cq_depth_256;
26104diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c
26105--- linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
26106+++ linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
26107@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
26108 target_stat_values[++index] = mh_detected;
26109 target_stat_values[++index] = mh_pauses_sent;
26110 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26111- target_stat_values[++index] = atomic_read(&cm_connects);
26112- target_stat_values[++index] = atomic_read(&cm_accepts);
26113- target_stat_values[++index] = atomic_read(&cm_disconnects);
26114- target_stat_values[++index] = atomic_read(&cm_connecteds);
26115- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26116- target_stat_values[++index] = atomic_read(&cm_rejects);
26117- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26118- target_stat_values[++index] = atomic_read(&qps_created);
26119- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26120- target_stat_values[++index] = atomic_read(&qps_destroyed);
26121- target_stat_values[++index] = atomic_read(&cm_closes);
26122+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26123+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26124+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26125+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26126+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26127+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26128+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26129+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26130+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26131+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26132+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26133 target_stat_values[++index] = cm_packets_sent;
26134 target_stat_values[++index] = cm_packets_bounced;
26135 target_stat_values[++index] = cm_packets_created;
26136 target_stat_values[++index] = cm_packets_received;
26137 target_stat_values[++index] = cm_packets_dropped;
26138 target_stat_values[++index] = cm_packets_retrans;
26139- target_stat_values[++index] = atomic_read(&cm_listens_created);
26140- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26141+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26142+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26143 target_stat_values[++index] = cm_backlog_drops;
26144- target_stat_values[++index] = atomic_read(&cm_loopbacks);
26145- target_stat_values[++index] = atomic_read(&cm_nodes_created);
26146- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26147- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26148- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26149+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26150+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26151+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26152+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26153+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26154 target_stat_values[++index] = nesadapter->free_4kpbl;
26155 target_stat_values[++index] = nesadapter->free_256pbl;
26156 target_stat_values[++index] = int_mod_timer_init;
26157diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c
26158--- linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
26159+++ linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
26160@@ -46,9 +46,9 @@
26161
26162 #include <rdma/ib_umem.h>
26163
26164-atomic_t mod_qp_timouts;
26165-atomic_t qps_created;
26166-atomic_t sw_qps_destroyed;
26167+atomic_unchecked_t mod_qp_timouts;
26168+atomic_unchecked_t qps_created;
26169+atomic_unchecked_t sw_qps_destroyed;
26170
26171 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26172
26173@@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26174 if (init_attr->create_flags)
26175 return ERR_PTR(-EINVAL);
26176
26177- atomic_inc(&qps_created);
26178+ atomic_inc_unchecked(&qps_created);
26179 switch (init_attr->qp_type) {
26180 case IB_QPT_RC:
26181 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26182@@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26183 struct iw_cm_event cm_event;
26184 int ret;
26185
26186- atomic_inc(&sw_qps_destroyed);
26187+ atomic_inc_unchecked(&sw_qps_destroyed);
26188 nesqp->destroyed = 1;
26189
26190 /* Blow away the connection if it exists. */
26191diff -urNp linux-3.0.4/drivers/infiniband/hw/qib/qib.h linux-3.0.4/drivers/infiniband/hw/qib/qib.h
26192--- linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
26193+++ linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
26194@@ -51,6 +51,7 @@
26195 #include <linux/completion.h>
26196 #include <linux/kref.h>
26197 #include <linux/sched.h>
26198+#include <linux/slab.h>
26199
26200 #include "qib_common.h"
26201 #include "qib_verbs.h"
26202diff -urNp linux-3.0.4/drivers/input/gameport/gameport.c linux-3.0.4/drivers/input/gameport/gameport.c
26203--- linux-3.0.4/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
26204+++ linux-3.0.4/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
26205@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26206 */
26207 static void gameport_init_port(struct gameport *gameport)
26208 {
26209- static atomic_t gameport_no = ATOMIC_INIT(0);
26210+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26211
26212 __module_get(THIS_MODULE);
26213
26214 mutex_init(&gameport->drv_mutex);
26215 device_initialize(&gameport->dev);
26216 dev_set_name(&gameport->dev, "gameport%lu",
26217- (unsigned long)atomic_inc_return(&gameport_no) - 1);
26218+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26219 gameport->dev.bus = &gameport_bus;
26220 gameport->dev.release = gameport_release_port;
26221 if (gameport->parent)
26222diff -urNp linux-3.0.4/drivers/input/input.c linux-3.0.4/drivers/input/input.c
26223--- linux-3.0.4/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
26224+++ linux-3.0.4/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
26225@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
26226 */
26227 int input_register_device(struct input_dev *dev)
26228 {
26229- static atomic_t input_no = ATOMIC_INIT(0);
26230+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26231 struct input_handler *handler;
26232 const char *path;
26233 int error;
26234@@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
26235 dev->setkeycode = input_default_setkeycode;
26236
26237 dev_set_name(&dev->dev, "input%ld",
26238- (unsigned long) atomic_inc_return(&input_no) - 1);
26239+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26240
26241 error = device_add(&dev->dev);
26242 if (error)
26243diff -urNp linux-3.0.4/drivers/input/joystick/sidewinder.c linux-3.0.4/drivers/input/joystick/sidewinder.c
26244--- linux-3.0.4/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
26245+++ linux-3.0.4/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
26246@@ -30,6 +30,7 @@
26247 #include <linux/kernel.h>
26248 #include <linux/module.h>
26249 #include <linux/slab.h>
26250+#include <linux/sched.h>
26251 #include <linux/init.h>
26252 #include <linux/input.h>
26253 #include <linux/gameport.h>
26254@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26255 unsigned char buf[SW_LENGTH];
26256 int i;
26257
26258+ pax_track_stack();
26259+
26260 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26261
26262 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26263diff -urNp linux-3.0.4/drivers/input/joystick/xpad.c linux-3.0.4/drivers/input/joystick/xpad.c
26264--- linux-3.0.4/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
26265+++ linux-3.0.4/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
26266@@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26267
26268 static int xpad_led_probe(struct usb_xpad *xpad)
26269 {
26270- static atomic_t led_seq = ATOMIC_INIT(0);
26271+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26272 long led_no;
26273 struct xpad_led *led;
26274 struct led_classdev *led_cdev;
26275@@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26276 if (!led)
26277 return -ENOMEM;
26278
26279- led_no = (long)atomic_inc_return(&led_seq) - 1;
26280+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26281
26282 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26283 led->xpad = xpad;
26284diff -urNp linux-3.0.4/drivers/input/mousedev.c linux-3.0.4/drivers/input/mousedev.c
26285--- linux-3.0.4/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
26286+++ linux-3.0.4/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
26287@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
26288
26289 spin_unlock_irq(&client->packet_lock);
26290
26291- if (copy_to_user(buffer, data, count))
26292+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
26293 return -EFAULT;
26294
26295 return count;
26296diff -urNp linux-3.0.4/drivers/input/serio/serio.c linux-3.0.4/drivers/input/serio/serio.c
26297--- linux-3.0.4/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
26298+++ linux-3.0.4/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
26299@@ -497,7 +497,7 @@ static void serio_release_port(struct de
26300 */
26301 static void serio_init_port(struct serio *serio)
26302 {
26303- static atomic_t serio_no = ATOMIC_INIT(0);
26304+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26305
26306 __module_get(THIS_MODULE);
26307
26308@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26309 mutex_init(&serio->drv_mutex);
26310 device_initialize(&serio->dev);
26311 dev_set_name(&serio->dev, "serio%ld",
26312- (long)atomic_inc_return(&serio_no) - 1);
26313+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
26314 serio->dev.bus = &serio_bus;
26315 serio->dev.release = serio_release_port;
26316 serio->dev.groups = serio_device_attr_groups;
26317diff -urNp linux-3.0.4/drivers/isdn/capi/capi.c linux-3.0.4/drivers/isdn/capi/capi.c
26318--- linux-3.0.4/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
26319+++ linux-3.0.4/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
26320@@ -83,8 +83,8 @@ struct capiminor {
26321
26322 struct capi20_appl *ap;
26323 u32 ncci;
26324- atomic_t datahandle;
26325- atomic_t msgid;
26326+ atomic_unchecked_t datahandle;
26327+ atomic_unchecked_t msgid;
26328
26329 struct tty_port port;
26330 int ttyinstop;
26331@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
26332 capimsg_setu16(s, 2, mp->ap->applid);
26333 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26334 capimsg_setu8 (s, 5, CAPI_RESP);
26335- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26336+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26337 capimsg_setu32(s, 8, mp->ncci);
26338 capimsg_setu16(s, 12, datahandle);
26339 }
26340@@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
26341 mp->outbytes -= len;
26342 spin_unlock_bh(&mp->outlock);
26343
26344- datahandle = atomic_inc_return(&mp->datahandle);
26345+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26346 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26347 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26348 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26349 capimsg_setu16(skb->data, 2, mp->ap->applid);
26350 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26351 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26352- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26353+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26354 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26355 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26356 capimsg_setu16(skb->data, 16, len); /* Data length */
26357diff -urNp linux-3.0.4/drivers/isdn/gigaset/common.c linux-3.0.4/drivers/isdn/gigaset/common.c
26358--- linux-3.0.4/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
26359+++ linux-3.0.4/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
26360@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26361 cs->commands_pending = 0;
26362 cs->cur_at_seq = 0;
26363 cs->gotfwver = -1;
26364- cs->open_count = 0;
26365+ local_set(&cs->open_count, 0);
26366 cs->dev = NULL;
26367 cs->tty = NULL;
26368 cs->tty_dev = NULL;
26369diff -urNp linux-3.0.4/drivers/isdn/gigaset/gigaset.h linux-3.0.4/drivers/isdn/gigaset/gigaset.h
26370--- linux-3.0.4/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
26371+++ linux-3.0.4/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
26372@@ -35,6 +35,7 @@
26373 #include <linux/tty_driver.h>
26374 #include <linux/list.h>
26375 #include <asm/atomic.h>
26376+#include <asm/local.h>
26377
26378 #define GIG_VERSION {0, 5, 0, 0}
26379 #define GIG_COMPAT {0, 4, 0, 0}
26380@@ -433,7 +434,7 @@ struct cardstate {
26381 spinlock_t cmdlock;
26382 unsigned curlen, cmdbytes;
26383
26384- unsigned open_count;
26385+ local_t open_count;
26386 struct tty_struct *tty;
26387 struct tasklet_struct if_wake_tasklet;
26388 unsigned control_state;
26389diff -urNp linux-3.0.4/drivers/isdn/gigaset/interface.c linux-3.0.4/drivers/isdn/gigaset/interface.c
26390--- linux-3.0.4/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
26391+++ linux-3.0.4/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
26392@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
26393 }
26394 tty->driver_data = cs;
26395
26396- ++cs->open_count;
26397-
26398- if (cs->open_count == 1) {
26399+ if (local_inc_return(&cs->open_count) == 1) {
26400 spin_lock_irqsave(&cs->lock, flags);
26401 cs->tty = tty;
26402 spin_unlock_irqrestore(&cs->lock, flags);
26403@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
26404
26405 if (!cs->connected)
26406 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26407- else if (!cs->open_count)
26408+ else if (!local_read(&cs->open_count))
26409 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26410 else {
26411- if (!--cs->open_count) {
26412+ if (!local_dec_return(&cs->open_count)) {
26413 spin_lock_irqsave(&cs->lock, flags);
26414 cs->tty = NULL;
26415 spin_unlock_irqrestore(&cs->lock, flags);
26416@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
26417 if (!cs->connected) {
26418 gig_dbg(DEBUG_IF, "not connected");
26419 retval = -ENODEV;
26420- } else if (!cs->open_count)
26421+ } else if (!local_read(&cs->open_count))
26422 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26423 else {
26424 retval = 0;
26425@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
26426 retval = -ENODEV;
26427 goto done;
26428 }
26429- if (!cs->open_count) {
26430+ if (!local_read(&cs->open_count)) {
26431 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26432 retval = -ENODEV;
26433 goto done;
26434@@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
26435 if (!cs->connected) {
26436 gig_dbg(DEBUG_IF, "not connected");
26437 retval = -ENODEV;
26438- } else if (!cs->open_count)
26439+ } else if (!local_read(&cs->open_count))
26440 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26441 else if (cs->mstate != MS_LOCKED) {
26442 dev_warn(cs->dev, "can't write to unlocked device\n");
26443@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
26444
26445 if (!cs->connected)
26446 gig_dbg(DEBUG_IF, "not connected");
26447- else if (!cs->open_count)
26448+ else if (!local_read(&cs->open_count))
26449 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26450 else if (cs->mstate != MS_LOCKED)
26451 dev_warn(cs->dev, "can't write to unlocked device\n");
26452@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
26453
26454 if (!cs->connected)
26455 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26456- else if (!cs->open_count)
26457+ else if (!local_read(&cs->open_count))
26458 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26459 else
26460 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26461@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
26462
26463 if (!cs->connected)
26464 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26465- else if (!cs->open_count)
26466+ else if (!local_read(&cs->open_count))
26467 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26468 else
26469 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26470@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
26471 goto out;
26472 }
26473
26474- if (!cs->open_count) {
26475+ if (!local_read(&cs->open_count)) {
26476 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26477 goto out;
26478 }
26479diff -urNp linux-3.0.4/drivers/isdn/hardware/avm/b1.c linux-3.0.4/drivers/isdn/hardware/avm/b1.c
26480--- linux-3.0.4/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
26481+++ linux-3.0.4/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
26482@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
26483 }
26484 if (left) {
26485 if (t4file->user) {
26486- if (copy_from_user(buf, dp, left))
26487+ if (left > sizeof buf || copy_from_user(buf, dp, left))
26488 return -EFAULT;
26489 } else {
26490 memcpy(buf, dp, left);
26491@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
26492 }
26493 if (left) {
26494 if (config->user) {
26495- if (copy_from_user(buf, dp, left))
26496+ if (left > sizeof buf || copy_from_user(buf, dp, left))
26497 return -EFAULT;
26498 } else {
26499 memcpy(buf, dp, left);
26500diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c
26501--- linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
26502+++ linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
26503@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
26504 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
26505 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
26506
26507+ pax_track_stack();
26508
26509 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
26510 {
26511diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c
26512--- linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
26513+++ linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
26514@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
26515 IDI_SYNC_REQ req;
26516 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26517
26518+ pax_track_stack();
26519+
26520 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26521
26522 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26523diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c
26524--- linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
26525+++ linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
26526@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
26527 IDI_SYNC_REQ req;
26528 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26529
26530+ pax_track_stack();
26531+
26532 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26533
26534 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26535diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c
26536--- linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
26537+++ linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
26538@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
26539 IDI_SYNC_REQ req;
26540 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26541
26542+ pax_track_stack();
26543+
26544 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26545
26546 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26547diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h
26548--- linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
26549+++ linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
26550@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
26551 } diva_didd_add_adapter_t;
26552 typedef struct _diva_didd_remove_adapter {
26553 IDI_CALL p_request;
26554-} diva_didd_remove_adapter_t;
26555+} __no_const diva_didd_remove_adapter_t;
26556 typedef struct _diva_didd_read_adapter_array {
26557 void * buffer;
26558 dword length;
26559diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c
26560--- linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
26561+++ linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
26562@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
26563 IDI_SYNC_REQ req;
26564 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26565
26566+ pax_track_stack();
26567+
26568 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26569
26570 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26571diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/message.c linux-3.0.4/drivers/isdn/hardware/eicon/message.c
26572--- linux-3.0.4/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
26573+++ linux-3.0.4/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
26574@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
26575 dword d;
26576 word w;
26577
26578+ pax_track_stack();
26579+
26580 a = plci->adapter;
26581 Id = ((word)plci->Id<<8)|a->Id;
26582 PUT_WORD(&SS_Ind[4],0x0000);
26583@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
26584 word j, n, w;
26585 dword d;
26586
26587+ pax_track_stack();
26588+
26589
26590 for(i=0;i<8;i++) bp_parms[i].length = 0;
26591 for(i=0;i<2;i++) global_config[i].length = 0;
26592@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
26593 const byte llc3[] = {4,3,2,2,6,6,0};
26594 const byte header[] = {0,2,3,3,0,0,0};
26595
26596+ pax_track_stack();
26597+
26598 for(i=0;i<8;i++) bp_parms[i].length = 0;
26599 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
26600 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
26601@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
26602 word appl_number_group_type[MAX_APPL];
26603 PLCI *auxplci;
26604
26605+ pax_track_stack();
26606+
26607 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
26608
26609 if(!a->group_optimization_enabled)
26610diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c
26611--- linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
26612+++ linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
26613@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
26614 IDI_SYNC_REQ req;
26615 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26616
26617+ pax_track_stack();
26618+
26619 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26620
26621 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26622diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h
26623--- linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
26624+++ linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
26625@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
26626 typedef struct _diva_os_idi_adapter_interface {
26627 diva_init_card_proc_t cleanup_adapter_proc;
26628 diva_cmd_card_proc_t cmd_proc;
26629-} diva_os_idi_adapter_interface_t;
26630+} __no_const diva_os_idi_adapter_interface_t;
26631
26632 typedef struct _diva_os_xdi_adapter {
26633 struct list_head link;
26634diff -urNp linux-3.0.4/drivers/isdn/i4l/isdn_common.c linux-3.0.4/drivers/isdn/i4l/isdn_common.c
26635--- linux-3.0.4/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
26636+++ linux-3.0.4/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
26637@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
26638 } iocpar;
26639 void __user *argp = (void __user *)arg;
26640
26641+ pax_track_stack();
26642+
26643 #define name iocpar.name
26644 #define bname iocpar.bname
26645 #define iocts iocpar.iocts
26646diff -urNp linux-3.0.4/drivers/isdn/icn/icn.c linux-3.0.4/drivers/isdn/icn/icn.c
26647--- linux-3.0.4/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
26648+++ linux-3.0.4/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
26649@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
26650 if (count > len)
26651 count = len;
26652 if (user) {
26653- if (copy_from_user(msg, buf, count))
26654+ if (count > sizeof msg || copy_from_user(msg, buf, count))
26655 return -EFAULT;
26656 } else
26657 memcpy(msg, buf, count);
26658diff -urNp linux-3.0.4/drivers/lguest/core.c linux-3.0.4/drivers/lguest/core.c
26659--- linux-3.0.4/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
26660+++ linux-3.0.4/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
26661@@ -92,9 +92,17 @@ static __init int map_switcher(void)
26662 * it's worked so far. The end address needs +1 because __get_vm_area
26663 * allocates an extra guard page, so we need space for that.
26664 */
26665+
26666+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26667+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26668+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
26669+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26670+#else
26671 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26672 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
26673 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26674+#endif
26675+
26676 if (!switcher_vma) {
26677 err = -ENOMEM;
26678 printk("lguest: could not map switcher pages high\n");
26679@@ -119,7 +127,7 @@ static __init int map_switcher(void)
26680 * Now the Switcher is mapped at the right address, we can't fail!
26681 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
26682 */
26683- memcpy(switcher_vma->addr, start_switcher_text,
26684+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
26685 end_switcher_text - start_switcher_text);
26686
26687 printk(KERN_INFO "lguest: mapped switcher at %p\n",
26688diff -urNp linux-3.0.4/drivers/lguest/x86/core.c linux-3.0.4/drivers/lguest/x86/core.c
26689--- linux-3.0.4/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
26690+++ linux-3.0.4/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
26691@@ -59,7 +59,7 @@ static struct {
26692 /* Offset from where switcher.S was compiled to where we've copied it */
26693 static unsigned long switcher_offset(void)
26694 {
26695- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
26696+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
26697 }
26698
26699 /* This cpu's struct lguest_pages. */
26700@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
26701 * These copies are pretty cheap, so we do them unconditionally: */
26702 /* Save the current Host top-level page directory.
26703 */
26704+
26705+#ifdef CONFIG_PAX_PER_CPU_PGD
26706+ pages->state.host_cr3 = read_cr3();
26707+#else
26708 pages->state.host_cr3 = __pa(current->mm->pgd);
26709+#endif
26710+
26711 /*
26712 * Set up the Guest's page tables to see this CPU's pages (and no
26713 * other CPU's pages).
26714@@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
26715 * compiled-in switcher code and the high-mapped copy we just made.
26716 */
26717 for (i = 0; i < IDT_ENTRIES; i++)
26718- default_idt_entries[i] += switcher_offset();
26719+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
26720
26721 /*
26722 * Set up the Switcher's per-cpu areas.
26723@@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
26724 * it will be undisturbed when we switch. To change %cs and jump we
26725 * need this structure to feed to Intel's "lcall" instruction.
26726 */
26727- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
26728+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
26729 lguest_entry.segment = LGUEST_CS;
26730
26731 /*
26732diff -urNp linux-3.0.4/drivers/lguest/x86/switcher_32.S linux-3.0.4/drivers/lguest/x86/switcher_32.S
26733--- linux-3.0.4/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
26734+++ linux-3.0.4/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
26735@@ -87,6 +87,7 @@
26736 #include <asm/page.h>
26737 #include <asm/segment.h>
26738 #include <asm/lguest.h>
26739+#include <asm/processor-flags.h>
26740
26741 // We mark the start of the code to copy
26742 // It's placed in .text tho it's never run here
26743@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
26744 // Changes type when we load it: damn Intel!
26745 // For after we switch over our page tables
26746 // That entry will be read-only: we'd crash.
26747+
26748+#ifdef CONFIG_PAX_KERNEXEC
26749+ mov %cr0, %edx
26750+ xor $X86_CR0_WP, %edx
26751+ mov %edx, %cr0
26752+#endif
26753+
26754 movl $(GDT_ENTRY_TSS*8), %edx
26755 ltr %dx
26756
26757@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
26758 // Let's clear it again for our return.
26759 // The GDT descriptor of the Host
26760 // Points to the table after two "size" bytes
26761- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
26762+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
26763 // Clear "used" from type field (byte 5, bit 2)
26764- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
26765+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
26766+
26767+#ifdef CONFIG_PAX_KERNEXEC
26768+ mov %cr0, %eax
26769+ xor $X86_CR0_WP, %eax
26770+ mov %eax, %cr0
26771+#endif
26772
26773 // Once our page table's switched, the Guest is live!
26774 // The Host fades as we run this final step.
26775@@ -295,13 +309,12 @@ deliver_to_host:
26776 // I consulted gcc, and it gave
26777 // These instructions, which I gladly credit:
26778 leal (%edx,%ebx,8), %eax
26779- movzwl (%eax),%edx
26780- movl 4(%eax), %eax
26781- xorw %ax, %ax
26782- orl %eax, %edx
26783+ movl 4(%eax), %edx
26784+ movw (%eax), %dx
26785 // Now the address of the handler's in %edx
26786 // We call it now: its "iret" drops us home.
26787- jmp *%edx
26788+ ljmp $__KERNEL_CS, $1f
26789+1: jmp *%edx
26790
26791 // Every interrupt can come to us here
26792 // But we must truly tell each apart.
26793diff -urNp linux-3.0.4/drivers/md/dm.c linux-3.0.4/drivers/md/dm.c
26794--- linux-3.0.4/drivers/md/dm.c 2011-09-02 18:11:21.000000000 -0400
26795+++ linux-3.0.4/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
26796@@ -164,9 +164,9 @@ struct mapped_device {
26797 /*
26798 * Event handling.
26799 */
26800- atomic_t event_nr;
26801+ atomic_unchecked_t event_nr;
26802 wait_queue_head_t eventq;
26803- atomic_t uevent_seq;
26804+ atomic_unchecked_t uevent_seq;
26805 struct list_head uevent_list;
26806 spinlock_t uevent_lock; /* Protect access to uevent_list */
26807
26808@@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
26809 rwlock_init(&md->map_lock);
26810 atomic_set(&md->holders, 1);
26811 atomic_set(&md->open_count, 0);
26812- atomic_set(&md->event_nr, 0);
26813- atomic_set(&md->uevent_seq, 0);
26814+ atomic_set_unchecked(&md->event_nr, 0);
26815+ atomic_set_unchecked(&md->uevent_seq, 0);
26816 INIT_LIST_HEAD(&md->uevent_list);
26817 spin_lock_init(&md->uevent_lock);
26818
26819@@ -1977,7 +1977,7 @@ static void event_callback(void *context
26820
26821 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
26822
26823- atomic_inc(&md->event_nr);
26824+ atomic_inc_unchecked(&md->event_nr);
26825 wake_up(&md->eventq);
26826 }
26827
26828@@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
26829
26830 uint32_t dm_next_uevent_seq(struct mapped_device *md)
26831 {
26832- return atomic_add_return(1, &md->uevent_seq);
26833+ return atomic_add_return_unchecked(1, &md->uevent_seq);
26834 }
26835
26836 uint32_t dm_get_event_nr(struct mapped_device *md)
26837 {
26838- return atomic_read(&md->event_nr);
26839+ return atomic_read_unchecked(&md->event_nr);
26840 }
26841
26842 int dm_wait_event(struct mapped_device *md, int event_nr)
26843 {
26844 return wait_event_interruptible(md->eventq,
26845- (event_nr != atomic_read(&md->event_nr)));
26846+ (event_nr != atomic_read_unchecked(&md->event_nr)));
26847 }
26848
26849 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26850diff -urNp linux-3.0.4/drivers/md/dm-ioctl.c linux-3.0.4/drivers/md/dm-ioctl.c
26851--- linux-3.0.4/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
26852+++ linux-3.0.4/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
26853@@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
26854 cmd == DM_LIST_VERSIONS_CMD)
26855 return 0;
26856
26857- if ((cmd == DM_DEV_CREATE_CMD)) {
26858+ if (cmd == DM_DEV_CREATE_CMD) {
26859 if (!*param->name) {
26860 DMWARN("name not supplied when creating device");
26861 return -EINVAL;
26862diff -urNp linux-3.0.4/drivers/md/dm-raid1.c linux-3.0.4/drivers/md/dm-raid1.c
26863--- linux-3.0.4/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
26864+++ linux-3.0.4/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
26865@@ -40,7 +40,7 @@ enum dm_raid1_error {
26866
26867 struct mirror {
26868 struct mirror_set *ms;
26869- atomic_t error_count;
26870+ atomic_unchecked_t error_count;
26871 unsigned long error_type;
26872 struct dm_dev *dev;
26873 sector_t offset;
26874@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
26875 struct mirror *m;
26876
26877 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
26878- if (!atomic_read(&m->error_count))
26879+ if (!atomic_read_unchecked(&m->error_count))
26880 return m;
26881
26882 return NULL;
26883@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
26884 * simple way to tell if a device has encountered
26885 * errors.
26886 */
26887- atomic_inc(&m->error_count);
26888+ atomic_inc_unchecked(&m->error_count);
26889
26890 if (test_and_set_bit(error_type, &m->error_type))
26891 return;
26892@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
26893 struct mirror *m = get_default_mirror(ms);
26894
26895 do {
26896- if (likely(!atomic_read(&m->error_count)))
26897+ if (likely(!atomic_read_unchecked(&m->error_count)))
26898 return m;
26899
26900 if (m-- == ms->mirror)
26901@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
26902 {
26903 struct mirror *default_mirror = get_default_mirror(m->ms);
26904
26905- return !atomic_read(&default_mirror->error_count);
26906+ return !atomic_read_unchecked(&default_mirror->error_count);
26907 }
26908
26909 static int mirror_available(struct mirror_set *ms, struct bio *bio)
26910@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
26911 */
26912 if (likely(region_in_sync(ms, region, 1)))
26913 m = choose_mirror(ms, bio->bi_sector);
26914- else if (m && atomic_read(&m->error_count))
26915+ else if (m && atomic_read_unchecked(&m->error_count))
26916 m = NULL;
26917
26918 if (likely(m))
26919@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
26920 }
26921
26922 ms->mirror[mirror].ms = ms;
26923- atomic_set(&(ms->mirror[mirror].error_count), 0);
26924+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
26925 ms->mirror[mirror].error_type = 0;
26926 ms->mirror[mirror].offset = offset;
26927
26928@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
26929 */
26930 static char device_status_char(struct mirror *m)
26931 {
26932- if (!atomic_read(&(m->error_count)))
26933+ if (!atomic_read_unchecked(&(m->error_count)))
26934 return 'A';
26935
26936 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
26937diff -urNp linux-3.0.4/drivers/md/dm-stripe.c linux-3.0.4/drivers/md/dm-stripe.c
26938--- linux-3.0.4/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
26939+++ linux-3.0.4/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
26940@@ -20,7 +20,7 @@ struct stripe {
26941 struct dm_dev *dev;
26942 sector_t physical_start;
26943
26944- atomic_t error_count;
26945+ atomic_unchecked_t error_count;
26946 };
26947
26948 struct stripe_c {
26949@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
26950 kfree(sc);
26951 return r;
26952 }
26953- atomic_set(&(sc->stripe[i].error_count), 0);
26954+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
26955 }
26956
26957 ti->private = sc;
26958@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
26959 DMEMIT("%d ", sc->stripes);
26960 for (i = 0; i < sc->stripes; i++) {
26961 DMEMIT("%s ", sc->stripe[i].dev->name);
26962- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
26963+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
26964 'D' : 'A';
26965 }
26966 buffer[i] = '\0';
26967@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
26968 */
26969 for (i = 0; i < sc->stripes; i++)
26970 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
26971- atomic_inc(&(sc->stripe[i].error_count));
26972- if (atomic_read(&(sc->stripe[i].error_count)) <
26973+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
26974+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
26975 DM_IO_ERROR_THRESHOLD)
26976 schedule_work(&sc->trigger_event);
26977 }
26978diff -urNp linux-3.0.4/drivers/md/dm-table.c linux-3.0.4/drivers/md/dm-table.c
26979--- linux-3.0.4/drivers/md/dm-table.c 2011-07-21 22:17:23.000000000 -0400
26980+++ linux-3.0.4/drivers/md/dm-table.c 2011-08-23 21:47:55.000000000 -0400
26981@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
26982 if (!dev_size)
26983 return 0;
26984
26985- if ((start >= dev_size) || (start + len > dev_size)) {
26986+ if ((start >= dev_size) || (len > dev_size - start)) {
26987 DMWARN("%s: %s too small for target: "
26988 "start=%llu, len=%llu, dev_size=%llu",
26989 dm_device_name(ti->table->md), bdevname(bdev, b),
26990diff -urNp linux-3.0.4/drivers/md/md.c linux-3.0.4/drivers/md/md.c
26991--- linux-3.0.4/drivers/md/md.c 2011-07-21 22:17:23.000000000 -0400
26992+++ linux-3.0.4/drivers/md/md.c 2011-08-23 21:47:55.000000000 -0400
26993@@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
26994 * start build, activate spare
26995 */
26996 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
26997-static atomic_t md_event_count;
26998+static atomic_unchecked_t md_event_count;
26999 void md_new_event(mddev_t *mddev)
27000 {
27001- atomic_inc(&md_event_count);
27002+ atomic_inc_unchecked(&md_event_count);
27003 wake_up(&md_event_waiters);
27004 }
27005 EXPORT_SYMBOL_GPL(md_new_event);
27006@@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
27007 */
27008 static void md_new_event_inintr(mddev_t *mddev)
27009 {
27010- atomic_inc(&md_event_count);
27011+ atomic_inc_unchecked(&md_event_count);
27012 wake_up(&md_event_waiters);
27013 }
27014
27015@@ -1457,7 +1457,7 @@ static int super_1_load(mdk_rdev_t *rdev
27016
27017 rdev->preferred_minor = 0xffff;
27018 rdev->data_offset = le64_to_cpu(sb->data_offset);
27019- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27020+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27021
27022 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27023 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27024@@ -1635,7 +1635,7 @@ static void super_1_sync(mddev_t *mddev,
27025 else
27026 sb->resync_offset = cpu_to_le64(0);
27027
27028- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27029+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27030
27031 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27032 sb->size = cpu_to_le64(mddev->dev_sectors);
27033@@ -2428,7 +2428,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27034 static ssize_t
27035 errors_show(mdk_rdev_t *rdev, char *page)
27036 {
27037- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27038+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27039 }
27040
27041 static ssize_t
27042@@ -2437,7 +2437,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27043 char *e;
27044 unsigned long n = simple_strtoul(buf, &e, 10);
27045 if (*buf && (*e == 0 || *e == '\n')) {
27046- atomic_set(&rdev->corrected_errors, n);
27047+ atomic_set_unchecked(&rdev->corrected_errors, n);
27048 return len;
27049 }
27050 return -EINVAL;
27051@@ -2793,8 +2793,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27052 rdev->last_read_error.tv_sec = 0;
27053 rdev->last_read_error.tv_nsec = 0;
27054 atomic_set(&rdev->nr_pending, 0);
27055- atomic_set(&rdev->read_errors, 0);
27056- atomic_set(&rdev->corrected_errors, 0);
27057+ atomic_set_unchecked(&rdev->read_errors, 0);
27058+ atomic_set_unchecked(&rdev->corrected_errors, 0);
27059
27060 INIT_LIST_HEAD(&rdev->same_set);
27061 init_waitqueue_head(&rdev->blocked_wait);
27062@@ -6415,7 +6415,7 @@ static int md_seq_show(struct seq_file *
27063
27064 spin_unlock(&pers_lock);
27065 seq_printf(seq, "\n");
27066- mi->event = atomic_read(&md_event_count);
27067+ mi->event = atomic_read_unchecked(&md_event_count);
27068 return 0;
27069 }
27070 if (v == (void*)2) {
27071@@ -6504,7 +6504,7 @@ static int md_seq_show(struct seq_file *
27072 chunk_kb ? "KB" : "B");
27073 if (bitmap->file) {
27074 seq_printf(seq, ", file: ");
27075- seq_path(seq, &bitmap->file->f_path, " \t\n");
27076+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27077 }
27078
27079 seq_printf(seq, "\n");
27080@@ -6538,7 +6538,7 @@ static int md_seq_open(struct inode *ino
27081 else {
27082 struct seq_file *p = file->private_data;
27083 p->private = mi;
27084- mi->event = atomic_read(&md_event_count);
27085+ mi->event = atomic_read_unchecked(&md_event_count);
27086 }
27087 return error;
27088 }
27089@@ -6554,7 +6554,7 @@ static unsigned int mdstat_poll(struct f
27090 /* always allow read */
27091 mask = POLLIN | POLLRDNORM;
27092
27093- if (mi->event != atomic_read(&md_event_count))
27094+ if (mi->event != atomic_read_unchecked(&md_event_count))
27095 mask |= POLLERR | POLLPRI;
27096 return mask;
27097 }
27098@@ -6598,7 +6598,7 @@ static int is_mddev_idle(mddev_t *mddev,
27099 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27100 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27101 (int)part_stat_read(&disk->part0, sectors[1]) -
27102- atomic_read(&disk->sync_io);
27103+ atomic_read_unchecked(&disk->sync_io);
27104 /* sync IO will cause sync_io to increase before the disk_stats
27105 * as sync_io is counted when a request starts, and
27106 * disk_stats is counted when it completes.
27107diff -urNp linux-3.0.4/drivers/md/md.h linux-3.0.4/drivers/md/md.h
27108--- linux-3.0.4/drivers/md/md.h 2011-07-21 22:17:23.000000000 -0400
27109+++ linux-3.0.4/drivers/md/md.h 2011-08-23 21:47:55.000000000 -0400
27110@@ -97,13 +97,13 @@ struct mdk_rdev_s
27111 * only maintained for arrays that
27112 * support hot removal
27113 */
27114- atomic_t read_errors; /* number of consecutive read errors that
27115+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
27116 * we have tried to ignore.
27117 */
27118 struct timespec last_read_error; /* monotonic time since our
27119 * last read error
27120 */
27121- atomic_t corrected_errors; /* number of corrected read errors,
27122+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27123 * for reporting to userspace and storing
27124 * in superblock.
27125 */
27126@@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
27127
27128 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27129 {
27130- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27131+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27132 }
27133
27134 struct mdk_personality
27135diff -urNp linux-3.0.4/drivers/md/raid10.c linux-3.0.4/drivers/md/raid10.c
27136--- linux-3.0.4/drivers/md/raid10.c 2011-07-21 22:17:23.000000000 -0400
27137+++ linux-3.0.4/drivers/md/raid10.c 2011-08-23 21:47:55.000000000 -0400
27138@@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
27139 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27140 set_bit(R10BIO_Uptodate, &r10_bio->state);
27141 else {
27142- atomic_add(r10_bio->sectors,
27143+ atomic_add_unchecked(r10_bio->sectors,
27144 &conf->mirrors[d].rdev->corrected_errors);
27145 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27146 md_error(r10_bio->mddev,
27147@@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
27148 {
27149 struct timespec cur_time_mon;
27150 unsigned long hours_since_last;
27151- unsigned int read_errors = atomic_read(&rdev->read_errors);
27152+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27153
27154 ktime_get_ts(&cur_time_mon);
27155
27156@@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
27157 * overflowing the shift of read_errors by hours_since_last.
27158 */
27159 if (hours_since_last >= 8 * sizeof(read_errors))
27160- atomic_set(&rdev->read_errors, 0);
27161+ atomic_set_unchecked(&rdev->read_errors, 0);
27162 else
27163- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27164+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27165 }
27166
27167 /*
27168@@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
27169 return;
27170
27171 check_decay_read_errors(mddev, rdev);
27172- atomic_inc(&rdev->read_errors);
27173- if (atomic_read(&rdev->read_errors) > max_read_errors) {
27174+ atomic_inc_unchecked(&rdev->read_errors);
27175+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
27176 char b[BDEVNAME_SIZE];
27177 bdevname(rdev->bdev, b);
27178
27179@@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
27180 "md/raid10:%s: %s: Raid device exceeded "
27181 "read_error threshold [cur %d:max %d]\n",
27182 mdname(mddev), b,
27183- atomic_read(&rdev->read_errors), max_read_errors);
27184+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
27185 printk(KERN_NOTICE
27186 "md/raid10:%s: %s: Failing raid device\n",
27187 mdname(mddev), b);
27188@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
27189 test_bit(In_sync, &rdev->flags)) {
27190 atomic_inc(&rdev->nr_pending);
27191 rcu_read_unlock();
27192- atomic_add(s, &rdev->corrected_errors);
27193+ atomic_add_unchecked(s, &rdev->corrected_errors);
27194 if (sync_page_io(rdev,
27195 r10_bio->devs[sl].addr +
27196 sect,
27197diff -urNp linux-3.0.4/drivers/md/raid1.c linux-3.0.4/drivers/md/raid1.c
27198--- linux-3.0.4/drivers/md/raid1.c 2011-07-21 22:17:23.000000000 -0400
27199+++ linux-3.0.4/drivers/md/raid1.c 2011-08-23 21:47:55.000000000 -0400
27200@@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
27201 rdev_dec_pending(rdev, mddev);
27202 md_error(mddev, rdev);
27203 } else
27204- atomic_add(s, &rdev->corrected_errors);
27205+ atomic_add_unchecked(s, &rdev->corrected_errors);
27206 }
27207 d = start;
27208 while (d != r1_bio->read_disk) {
27209@@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
27210 /* Well, this device is dead */
27211 md_error(mddev, rdev);
27212 else {
27213- atomic_add(s, &rdev->corrected_errors);
27214+ atomic_add_unchecked(s, &rdev->corrected_errors);
27215 printk(KERN_INFO
27216 "md/raid1:%s: read error corrected "
27217 "(%d sectors at %llu on %s)\n",
27218diff -urNp linux-3.0.4/drivers/md/raid5.c linux-3.0.4/drivers/md/raid5.c
27219--- linux-3.0.4/drivers/md/raid5.c 2011-07-21 22:17:23.000000000 -0400
27220+++ linux-3.0.4/drivers/md/raid5.c 2011-08-23 21:48:14.000000000 -0400
27221@@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27222 bi->bi_next = NULL;
27223 if ((rw & WRITE) &&
27224 test_bit(R5_ReWrite, &sh->dev[i].flags))
27225- atomic_add(STRIPE_SECTORS,
27226+ atomic_add_unchecked(STRIPE_SECTORS,
27227 &rdev->corrected_errors);
27228 generic_make_request(bi);
27229 } else {
27230@@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27231 clear_bit(R5_ReadError, &sh->dev[i].flags);
27232 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27233 }
27234- if (atomic_read(&conf->disks[i].rdev->read_errors))
27235- atomic_set(&conf->disks[i].rdev->read_errors, 0);
27236+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27237+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27238 } else {
27239 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27240 int retry = 0;
27241 rdev = conf->disks[i].rdev;
27242
27243 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27244- atomic_inc(&rdev->read_errors);
27245+ atomic_inc_unchecked(&rdev->read_errors);
27246 if (conf->mddev->degraded >= conf->max_degraded)
27247 printk_rl(KERN_WARNING
27248 "md/raid:%s: read error not correctable "
27249@@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27250 (unsigned long long)(sh->sector
27251 + rdev->data_offset),
27252 bdn);
27253- else if (atomic_read(&rdev->read_errors)
27254+ else if (atomic_read_unchecked(&rdev->read_errors)
27255 > conf->max_nr_stripes)
27256 printk(KERN_WARNING
27257 "md/raid:%s: Too many read errors, failing device %s.\n",
27258@@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
27259 sector_t r_sector;
27260 struct stripe_head sh2;
27261
27262+ pax_track_stack();
27263
27264 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27265 stripe = new_sector;
27266diff -urNp linux-3.0.4/drivers/media/common/saa7146_hlp.c linux-3.0.4/drivers/media/common/saa7146_hlp.c
27267--- linux-3.0.4/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
27268+++ linux-3.0.4/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
27269@@ -353,6 +353,8 @@ static void calculate_clipping_registers
27270
27271 int x[32], y[32], w[32], h[32];
27272
27273+ pax_track_stack();
27274+
27275 /* clear out memory */
27276 memset(&line_list[0], 0x00, sizeof(u32)*32);
27277 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27278diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27279--- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
27280+++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
27281@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27282 u8 buf[HOST_LINK_BUF_SIZE];
27283 int i;
27284
27285+ pax_track_stack();
27286+
27287 dprintk("%s\n", __func__);
27288
27289 /* check if we have space for a link buf in the rx_buffer */
27290@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27291 unsigned long timeout;
27292 int written;
27293
27294+ pax_track_stack();
27295+
27296 dprintk("%s\n", __func__);
27297
27298 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27299diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h
27300--- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-21 22:17:23.000000000 -0400
27301+++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-24 18:24:40.000000000 -0400
27302@@ -68,12 +68,12 @@ struct dvb_demux_feed {
27303 union {
27304 struct dmx_ts_feed ts;
27305 struct dmx_section_feed sec;
27306- } feed;
27307+ } __no_const feed;
27308
27309 union {
27310 dmx_ts_cb ts;
27311 dmx_section_cb sec;
27312- } cb;
27313+ } __no_const cb;
27314
27315 struct dvb_demux *demux;
27316 void *priv;
27317diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c
27318--- linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-07-21 22:17:23.000000000 -0400
27319+++ linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-24 18:24:19.000000000 -0400
27320@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27321 const struct dvb_device *template, void *priv, int type)
27322 {
27323 struct dvb_device *dvbdev;
27324- struct file_operations *dvbdevfops;
27325+ file_operations_no_const *dvbdevfops;
27326 struct device *clsdev;
27327 int minor;
27328 int id;
27329diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c
27330--- linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-07-21 22:17:23.000000000 -0400
27331+++ linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-24 18:26:33.000000000 -0400
27332@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27333 struct dib0700_adapter_state {
27334 int (*set_param_save) (struct dvb_frontend *,
27335 struct dvb_frontend_parameters *);
27336-};
27337+} __no_const;
27338
27339 static int dib7070_set_param_override(struct dvb_frontend *fe,
27340 struct dvb_frontend_parameters *fep)
27341diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c
27342--- linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
27343+++ linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
27344@@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
27345 if (!buf)
27346 return -ENOMEM;
27347
27348+ pax_track_stack();
27349+
27350 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27351 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27352 hx.addr, hx.len, hx.chk);
27353diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h
27354--- linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h 2011-07-21 22:17:23.000000000 -0400
27355+++ linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h 2011-08-24 18:27:27.000000000 -0400
27356@@ -97,7 +97,7 @@
27357 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
27358
27359 struct dibusb_state {
27360- struct dib_fe_xfer_ops ops;
27361+ dib_fe_xfer_ops_no_const ops;
27362 int mt2060_present;
27363 u8 tuner_addr;
27364 };
27365diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c
27366--- linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-07-21 22:17:23.000000000 -0400
27367+++ linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-24 18:27:45.000000000 -0400
27368@@ -95,7 +95,7 @@ struct su3000_state {
27369
27370 struct s6x0_state {
27371 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27372-};
27373+} __no_const;
27374
27375 /* debug */
27376 static int dvb_usb_dw2102_debug;
27377diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c
27378--- linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
27379+++ linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
27380@@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
27381 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
27382 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
27383
27384+ pax_track_stack();
27385
27386 data[0] = 0x8a;
27387 len_in = 1;
27388@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
27389 int ret = 0, len_in;
27390 u8 data[512] = {0};
27391
27392+ pax_track_stack();
27393+
27394 data[0] = 0x0a;
27395 len_in = 1;
27396 info("FRM Firmware Cold Reset");
27397diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000.h linux-3.0.4/drivers/media/dvb/frontends/dib3000.h
27398--- linux-3.0.4/drivers/media/dvb/frontends/dib3000.h 2011-07-21 22:17:23.000000000 -0400
27399+++ linux-3.0.4/drivers/media/dvb/frontends/dib3000.h 2011-08-24 18:28:18.000000000 -0400
27400@@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
27401 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27402 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27403 };
27404+typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
27405
27406 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27407 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27408- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
27409+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
27410 #else
27411 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27412 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27413diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c
27414--- linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
27415+++ linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
27416@@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
27417 static struct dvb_frontend_ops dib3000mb_ops;
27418
27419 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27420- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27421+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
27422 {
27423 struct dib3000_state* state = NULL;
27424
27425diff -urNp linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c
27426--- linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
27427+++ linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
27428@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27429 int ret = -1;
27430 int sync;
27431
27432+ pax_track_stack();
27433+
27434 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27435
27436 fcp = 3000;
27437diff -urNp linux-3.0.4/drivers/media/dvb/frontends/or51211.c linux-3.0.4/drivers/media/dvb/frontends/or51211.c
27438--- linux-3.0.4/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
27439+++ linux-3.0.4/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
27440@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27441 u8 tudata[585];
27442 int i;
27443
27444+ pax_track_stack();
27445+
27446 dprintk("Firmware is %zd bytes\n",fw->size);
27447
27448 /* Get eprom data */
27449diff -urNp linux-3.0.4/drivers/media/video/cx18/cx18-driver.c linux-3.0.4/drivers/media/video/cx18/cx18-driver.c
27450--- linux-3.0.4/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
27451+++ linux-3.0.4/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
27452@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27453 struct i2c_client c;
27454 u8 eedata[256];
27455
27456+ pax_track_stack();
27457+
27458 memset(&c, 0, sizeof(c));
27459 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
27460 c.adapter = &cx->i2c_adap[0];
27461diff -urNp linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c
27462--- linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
27463+++ linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
27464@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
27465 bool handle = false;
27466 struct ir_raw_event ir_core_event[64];
27467
27468+ pax_track_stack();
27469+
27470 do {
27471 num = 0;
27472 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
27473diff -urNp linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
27474--- linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
27475+++ linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
27476@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
27477 u8 *eeprom;
27478 struct tveeprom tvdata;
27479
27480+ pax_track_stack();
27481+
27482 memset(&tvdata,0,sizeof(tvdata));
27483
27484 eeprom = pvr2_eeprom_fetch(hdw);
27485diff -urNp linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c
27486--- linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
27487+++ linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
27488@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
27489 unsigned char localPAT[256];
27490 unsigned char localPMT[256];
27491
27492+ pax_track_stack();
27493+
27494 /* Set video format - must be done first as it resets other settings */
27495 set_reg8(client, 0x41, h->video_format);
27496
27497diff -urNp linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c
27498--- linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
27499+++ linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
27500@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
27501 u8 tmp[512];
27502 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27503
27504+ pax_track_stack();
27505+
27506 /* While any outstand message on the bus exists... */
27507 do {
27508
27509@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
27510 u8 tmp[512];
27511 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27512
27513+ pax_track_stack();
27514+
27515 while (loop) {
27516
27517 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
27518diff -urNp linux-3.0.4/drivers/media/video/timblogiw.c linux-3.0.4/drivers/media/video/timblogiw.c
27519--- linux-3.0.4/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
27520+++ linux-3.0.4/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
27521@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
27522
27523 /* Platform device functions */
27524
27525-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27526+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
27527 .vidioc_querycap = timblogiw_querycap,
27528 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
27529 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
27530diff -urNp linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c
27531--- linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
27532+++ linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
27533@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
27534 unsigned char rv, gv, bv;
27535 static unsigned char *Y, *U, *V;
27536
27537+ pax_track_stack();
27538+
27539 frame = usbvision->cur_frame;
27540 image_size = frame->frmwidth * frame->frmheight;
27541 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
27542diff -urNp linux-3.0.4/drivers/media/video/videobuf-dma-sg.c linux-3.0.4/drivers/media/video/videobuf-dma-sg.c
27543--- linux-3.0.4/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
27544+++ linux-3.0.4/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
27545@@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
27546 {
27547 struct videobuf_queue q;
27548
27549+ pax_track_stack();
27550+
27551 /* Required to make generic handler to call __videobuf_alloc */
27552 q.int_ops = &sg_ops;
27553
27554diff -urNp linux-3.0.4/drivers/message/fusion/mptbase.c linux-3.0.4/drivers/message/fusion/mptbase.c
27555--- linux-3.0.4/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
27556+++ linux-3.0.4/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
27557@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
27558 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
27559 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
27560
27561+#ifdef CONFIG_GRKERNSEC_HIDESYM
27562+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
27563+#else
27564 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
27565 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
27566+#endif
27567+
27568 /*
27569 * Rounding UP to nearest 4-kB boundary here...
27570 */
27571diff -urNp linux-3.0.4/drivers/message/fusion/mptsas.c linux-3.0.4/drivers/message/fusion/mptsas.c
27572--- linux-3.0.4/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
27573+++ linux-3.0.4/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
27574@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
27575 return 0;
27576 }
27577
27578+static inline void
27579+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27580+{
27581+ if (phy_info->port_details) {
27582+ phy_info->port_details->rphy = rphy;
27583+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27584+ ioc->name, rphy));
27585+ }
27586+
27587+ if (rphy) {
27588+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27589+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27590+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27591+ ioc->name, rphy, rphy->dev.release));
27592+ }
27593+}
27594+
27595 /* no mutex */
27596 static void
27597 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
27598@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
27599 return NULL;
27600 }
27601
27602-static inline void
27603-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27604-{
27605- if (phy_info->port_details) {
27606- phy_info->port_details->rphy = rphy;
27607- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27608- ioc->name, rphy));
27609- }
27610-
27611- if (rphy) {
27612- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27613- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27614- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27615- ioc->name, rphy, rphy->dev.release));
27616- }
27617-}
27618-
27619 static inline struct sas_port *
27620 mptsas_get_port(struct mptsas_phyinfo *phy_info)
27621 {
27622diff -urNp linux-3.0.4/drivers/message/fusion/mptscsih.c linux-3.0.4/drivers/message/fusion/mptscsih.c
27623--- linux-3.0.4/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
27624+++ linux-3.0.4/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
27625@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
27626
27627 h = shost_priv(SChost);
27628
27629- if (h) {
27630- if (h->info_kbuf == NULL)
27631- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27632- return h->info_kbuf;
27633- h->info_kbuf[0] = '\0';
27634+ if (!h)
27635+ return NULL;
27636
27637- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27638- h->info_kbuf[size-1] = '\0';
27639- }
27640+ if (h->info_kbuf == NULL)
27641+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27642+ return h->info_kbuf;
27643+ h->info_kbuf[0] = '\0';
27644+
27645+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27646+ h->info_kbuf[size-1] = '\0';
27647
27648 return h->info_kbuf;
27649 }
27650diff -urNp linux-3.0.4/drivers/message/i2o/i2o_config.c linux-3.0.4/drivers/message/i2o/i2o_config.c
27651--- linux-3.0.4/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
27652+++ linux-3.0.4/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
27653@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
27654 struct i2o_message *msg;
27655 unsigned int iop;
27656
27657+ pax_track_stack();
27658+
27659 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
27660 return -EFAULT;
27661
27662diff -urNp linux-3.0.4/drivers/message/i2o/i2o_proc.c linux-3.0.4/drivers/message/i2o/i2o_proc.c
27663--- linux-3.0.4/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
27664+++ linux-3.0.4/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
27665@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
27666 "Array Controller Device"
27667 };
27668
27669-static char *chtostr(u8 * chars, int n)
27670-{
27671- char tmp[256];
27672- tmp[0] = 0;
27673- return strncat(tmp, (char *)chars, n);
27674-}
27675-
27676 static int i2o_report_query_status(struct seq_file *seq, int block_status,
27677 char *group)
27678 {
27679@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
27680
27681 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
27682 seq_printf(seq, "%-#8x", ddm_table.module_id);
27683- seq_printf(seq, "%-29s",
27684- chtostr(ddm_table.module_name_version, 28));
27685+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
27686 seq_printf(seq, "%9d ", ddm_table.data_size);
27687 seq_printf(seq, "%8d", ddm_table.code_size);
27688
27689@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
27690
27691 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
27692 seq_printf(seq, "%-#8x", dst->module_id);
27693- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
27694- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
27695+ seq_printf(seq, "%-.28s", dst->module_name_version);
27696+ seq_printf(seq, "%-.8s", dst->date);
27697 seq_printf(seq, "%8d ", dst->module_size);
27698 seq_printf(seq, "%8d ", dst->mpb_size);
27699 seq_printf(seq, "0x%04x", dst->module_flags);
27700@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
27701 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
27702 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
27703 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
27704- seq_printf(seq, "Vendor info : %s\n",
27705- chtostr((u8 *) (work32 + 2), 16));
27706- seq_printf(seq, "Product info : %s\n",
27707- chtostr((u8 *) (work32 + 6), 16));
27708- seq_printf(seq, "Description : %s\n",
27709- chtostr((u8 *) (work32 + 10), 16));
27710- seq_printf(seq, "Product rev. : %s\n",
27711- chtostr((u8 *) (work32 + 14), 8));
27712+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
27713+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
27714+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
27715+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
27716
27717 seq_printf(seq, "Serial number : ");
27718 print_serial_number(seq, (u8 *) (work32 + 16),
27719@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
27720 }
27721
27722 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
27723- seq_printf(seq, "Module name : %s\n",
27724- chtostr(result.module_name, 24));
27725- seq_printf(seq, "Module revision : %s\n",
27726- chtostr(result.module_rev, 8));
27727+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
27728+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
27729
27730 seq_printf(seq, "Serial number : ");
27731 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
27732@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
27733 return 0;
27734 }
27735
27736- seq_printf(seq, "Device name : %s\n",
27737- chtostr(result.device_name, 64));
27738- seq_printf(seq, "Service name : %s\n",
27739- chtostr(result.service_name, 64));
27740- seq_printf(seq, "Physical name : %s\n",
27741- chtostr(result.physical_location, 64));
27742- seq_printf(seq, "Instance number : %s\n",
27743- chtostr(result.instance_number, 4));
27744+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
27745+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
27746+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
27747+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
27748
27749 return 0;
27750 }
27751diff -urNp linux-3.0.4/drivers/message/i2o/iop.c linux-3.0.4/drivers/message/i2o/iop.c
27752--- linux-3.0.4/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
27753+++ linux-3.0.4/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
27754@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
27755
27756 spin_lock_irqsave(&c->context_list_lock, flags);
27757
27758- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
27759- atomic_inc(&c->context_list_counter);
27760+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
27761+ atomic_inc_unchecked(&c->context_list_counter);
27762
27763- entry->context = atomic_read(&c->context_list_counter);
27764+ entry->context = atomic_read_unchecked(&c->context_list_counter);
27765
27766 list_add(&entry->list, &c->context_list);
27767
27768@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
27769
27770 #if BITS_PER_LONG == 64
27771 spin_lock_init(&c->context_list_lock);
27772- atomic_set(&c->context_list_counter, 0);
27773+ atomic_set_unchecked(&c->context_list_counter, 0);
27774 INIT_LIST_HEAD(&c->context_list);
27775 #endif
27776
27777diff -urNp linux-3.0.4/drivers/mfd/abx500-core.c linux-3.0.4/drivers/mfd/abx500-core.c
27778--- linux-3.0.4/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
27779+++ linux-3.0.4/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
27780@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
27781
27782 struct abx500_device_entry {
27783 struct list_head list;
27784- struct abx500_ops ops;
27785+ abx500_ops_no_const ops;
27786 struct device *dev;
27787 };
27788
27789diff -urNp linux-3.0.4/drivers/mfd/janz-cmodio.c linux-3.0.4/drivers/mfd/janz-cmodio.c
27790--- linux-3.0.4/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
27791+++ linux-3.0.4/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
27792@@ -13,6 +13,7 @@
27793
27794 #include <linux/kernel.h>
27795 #include <linux/module.h>
27796+#include <linux/slab.h>
27797 #include <linux/init.h>
27798 #include <linux/pci.h>
27799 #include <linux/interrupt.h>
27800diff -urNp linux-3.0.4/drivers/mfd/wm8350-i2c.c linux-3.0.4/drivers/mfd/wm8350-i2c.c
27801--- linux-3.0.4/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
27802+++ linux-3.0.4/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
27803@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
27804 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
27805 int ret;
27806
27807+ pax_track_stack();
27808+
27809 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
27810 return -EINVAL;
27811
27812diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c
27813--- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-07-21 22:17:23.000000000 -0400
27814+++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-23 21:47:55.000000000 -0400
27815@@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
27816 * the lid is closed. This leads to interrupts as soon as a little move
27817 * is done.
27818 */
27819- atomic_inc(&lis3_dev.count);
27820+ atomic_inc_unchecked(&lis3_dev.count);
27821
27822 wake_up_interruptible(&lis3_dev.misc_wait);
27823 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
27824@@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
27825 if (lis3_dev.pm_dev)
27826 pm_runtime_get_sync(lis3_dev.pm_dev);
27827
27828- atomic_set(&lis3_dev.count, 0);
27829+ atomic_set_unchecked(&lis3_dev.count, 0);
27830 return 0;
27831 }
27832
27833@@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
27834 add_wait_queue(&lis3_dev.misc_wait, &wait);
27835 while (true) {
27836 set_current_state(TASK_INTERRUPTIBLE);
27837- data = atomic_xchg(&lis3_dev.count, 0);
27838+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
27839 if (data)
27840 break;
27841
27842@@ -583,7 +583,7 @@ out:
27843 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
27844 {
27845 poll_wait(file, &lis3_dev.misc_wait, wait);
27846- if (atomic_read(&lis3_dev.count))
27847+ if (atomic_read_unchecked(&lis3_dev.count))
27848 return POLLIN | POLLRDNORM;
27849 return 0;
27850 }
27851diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h
27852--- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
27853+++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
27854@@ -265,7 +265,7 @@ struct lis3lv02d {
27855 struct input_polled_dev *idev; /* input device */
27856 struct platform_device *pdev; /* platform device */
27857 struct regulator_bulk_data regulators[2];
27858- atomic_t count; /* interrupt count after last read */
27859+ atomic_unchecked_t count; /* interrupt count after last read */
27860 union axis_conversion ac; /* hw -> logical axis */
27861 int mapped_btns[3];
27862
27863diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c
27864--- linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
27865+++ linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
27866@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
27867 unsigned long nsec;
27868
27869 nsec = CLKS2NSEC(clks);
27870- atomic_long_inc(&mcs_op_statistics[op].count);
27871- atomic_long_add(nsec, &mcs_op_statistics[op].total);
27872+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
27873+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
27874 if (mcs_op_statistics[op].max < nsec)
27875 mcs_op_statistics[op].max = nsec;
27876 }
27877diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c
27878--- linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
27879+++ linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
27880@@ -32,9 +32,9 @@
27881
27882 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
27883
27884-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
27885+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
27886 {
27887- unsigned long val = atomic_long_read(v);
27888+ unsigned long val = atomic_long_read_unchecked(v);
27889
27890 seq_printf(s, "%16lu %s\n", val, id);
27891 }
27892@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
27893
27894 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
27895 for (op = 0; op < mcsop_last; op++) {
27896- count = atomic_long_read(&mcs_op_statistics[op].count);
27897- total = atomic_long_read(&mcs_op_statistics[op].total);
27898+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
27899+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
27900 max = mcs_op_statistics[op].max;
27901 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
27902 count ? total / count : 0, max);
27903diff -urNp linux-3.0.4/drivers/misc/sgi-gru/grutables.h linux-3.0.4/drivers/misc/sgi-gru/grutables.h
27904--- linux-3.0.4/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
27905+++ linux-3.0.4/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
27906@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
27907 * GRU statistics.
27908 */
27909 struct gru_stats_s {
27910- atomic_long_t vdata_alloc;
27911- atomic_long_t vdata_free;
27912- atomic_long_t gts_alloc;
27913- atomic_long_t gts_free;
27914- atomic_long_t gms_alloc;
27915- atomic_long_t gms_free;
27916- atomic_long_t gts_double_allocate;
27917- atomic_long_t assign_context;
27918- atomic_long_t assign_context_failed;
27919- atomic_long_t free_context;
27920- atomic_long_t load_user_context;
27921- atomic_long_t load_kernel_context;
27922- atomic_long_t lock_kernel_context;
27923- atomic_long_t unlock_kernel_context;
27924- atomic_long_t steal_user_context;
27925- atomic_long_t steal_kernel_context;
27926- atomic_long_t steal_context_failed;
27927- atomic_long_t nopfn;
27928- atomic_long_t asid_new;
27929- atomic_long_t asid_next;
27930- atomic_long_t asid_wrap;
27931- atomic_long_t asid_reuse;
27932- atomic_long_t intr;
27933- atomic_long_t intr_cbr;
27934- atomic_long_t intr_tfh;
27935- atomic_long_t intr_spurious;
27936- atomic_long_t intr_mm_lock_failed;
27937- atomic_long_t call_os;
27938- atomic_long_t call_os_wait_queue;
27939- atomic_long_t user_flush_tlb;
27940- atomic_long_t user_unload_context;
27941- atomic_long_t user_exception;
27942- atomic_long_t set_context_option;
27943- atomic_long_t check_context_retarget_intr;
27944- atomic_long_t check_context_unload;
27945- atomic_long_t tlb_dropin;
27946- atomic_long_t tlb_preload_page;
27947- atomic_long_t tlb_dropin_fail_no_asid;
27948- atomic_long_t tlb_dropin_fail_upm;
27949- atomic_long_t tlb_dropin_fail_invalid;
27950- atomic_long_t tlb_dropin_fail_range_active;
27951- atomic_long_t tlb_dropin_fail_idle;
27952- atomic_long_t tlb_dropin_fail_fmm;
27953- atomic_long_t tlb_dropin_fail_no_exception;
27954- atomic_long_t tfh_stale_on_fault;
27955- atomic_long_t mmu_invalidate_range;
27956- atomic_long_t mmu_invalidate_page;
27957- atomic_long_t flush_tlb;
27958- atomic_long_t flush_tlb_gru;
27959- atomic_long_t flush_tlb_gru_tgh;
27960- atomic_long_t flush_tlb_gru_zero_asid;
27961-
27962- atomic_long_t copy_gpa;
27963- atomic_long_t read_gpa;
27964-
27965- atomic_long_t mesq_receive;
27966- atomic_long_t mesq_receive_none;
27967- atomic_long_t mesq_send;
27968- atomic_long_t mesq_send_failed;
27969- atomic_long_t mesq_noop;
27970- atomic_long_t mesq_send_unexpected_error;
27971- atomic_long_t mesq_send_lb_overflow;
27972- atomic_long_t mesq_send_qlimit_reached;
27973- atomic_long_t mesq_send_amo_nacked;
27974- atomic_long_t mesq_send_put_nacked;
27975- atomic_long_t mesq_page_overflow;
27976- atomic_long_t mesq_qf_locked;
27977- atomic_long_t mesq_qf_noop_not_full;
27978- atomic_long_t mesq_qf_switch_head_failed;
27979- atomic_long_t mesq_qf_unexpected_error;
27980- atomic_long_t mesq_noop_unexpected_error;
27981- atomic_long_t mesq_noop_lb_overflow;
27982- atomic_long_t mesq_noop_qlimit_reached;
27983- atomic_long_t mesq_noop_amo_nacked;
27984- atomic_long_t mesq_noop_put_nacked;
27985- atomic_long_t mesq_noop_page_overflow;
27986+ atomic_long_unchecked_t vdata_alloc;
27987+ atomic_long_unchecked_t vdata_free;
27988+ atomic_long_unchecked_t gts_alloc;
27989+ atomic_long_unchecked_t gts_free;
27990+ atomic_long_unchecked_t gms_alloc;
27991+ atomic_long_unchecked_t gms_free;
27992+ atomic_long_unchecked_t gts_double_allocate;
27993+ atomic_long_unchecked_t assign_context;
27994+ atomic_long_unchecked_t assign_context_failed;
27995+ atomic_long_unchecked_t free_context;
27996+ atomic_long_unchecked_t load_user_context;
27997+ atomic_long_unchecked_t load_kernel_context;
27998+ atomic_long_unchecked_t lock_kernel_context;
27999+ atomic_long_unchecked_t unlock_kernel_context;
28000+ atomic_long_unchecked_t steal_user_context;
28001+ atomic_long_unchecked_t steal_kernel_context;
28002+ atomic_long_unchecked_t steal_context_failed;
28003+ atomic_long_unchecked_t nopfn;
28004+ atomic_long_unchecked_t asid_new;
28005+ atomic_long_unchecked_t asid_next;
28006+ atomic_long_unchecked_t asid_wrap;
28007+ atomic_long_unchecked_t asid_reuse;
28008+ atomic_long_unchecked_t intr;
28009+ atomic_long_unchecked_t intr_cbr;
28010+ atomic_long_unchecked_t intr_tfh;
28011+ atomic_long_unchecked_t intr_spurious;
28012+ atomic_long_unchecked_t intr_mm_lock_failed;
28013+ atomic_long_unchecked_t call_os;
28014+ atomic_long_unchecked_t call_os_wait_queue;
28015+ atomic_long_unchecked_t user_flush_tlb;
28016+ atomic_long_unchecked_t user_unload_context;
28017+ atomic_long_unchecked_t user_exception;
28018+ atomic_long_unchecked_t set_context_option;
28019+ atomic_long_unchecked_t check_context_retarget_intr;
28020+ atomic_long_unchecked_t check_context_unload;
28021+ atomic_long_unchecked_t tlb_dropin;
28022+ atomic_long_unchecked_t tlb_preload_page;
28023+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28024+ atomic_long_unchecked_t tlb_dropin_fail_upm;
28025+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
28026+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
28027+ atomic_long_unchecked_t tlb_dropin_fail_idle;
28028+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
28029+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28030+ atomic_long_unchecked_t tfh_stale_on_fault;
28031+ atomic_long_unchecked_t mmu_invalidate_range;
28032+ atomic_long_unchecked_t mmu_invalidate_page;
28033+ atomic_long_unchecked_t flush_tlb;
28034+ atomic_long_unchecked_t flush_tlb_gru;
28035+ atomic_long_unchecked_t flush_tlb_gru_tgh;
28036+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28037+
28038+ atomic_long_unchecked_t copy_gpa;
28039+ atomic_long_unchecked_t read_gpa;
28040+
28041+ atomic_long_unchecked_t mesq_receive;
28042+ atomic_long_unchecked_t mesq_receive_none;
28043+ atomic_long_unchecked_t mesq_send;
28044+ atomic_long_unchecked_t mesq_send_failed;
28045+ atomic_long_unchecked_t mesq_noop;
28046+ atomic_long_unchecked_t mesq_send_unexpected_error;
28047+ atomic_long_unchecked_t mesq_send_lb_overflow;
28048+ atomic_long_unchecked_t mesq_send_qlimit_reached;
28049+ atomic_long_unchecked_t mesq_send_amo_nacked;
28050+ atomic_long_unchecked_t mesq_send_put_nacked;
28051+ atomic_long_unchecked_t mesq_page_overflow;
28052+ atomic_long_unchecked_t mesq_qf_locked;
28053+ atomic_long_unchecked_t mesq_qf_noop_not_full;
28054+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
28055+ atomic_long_unchecked_t mesq_qf_unexpected_error;
28056+ atomic_long_unchecked_t mesq_noop_unexpected_error;
28057+ atomic_long_unchecked_t mesq_noop_lb_overflow;
28058+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
28059+ atomic_long_unchecked_t mesq_noop_amo_nacked;
28060+ atomic_long_unchecked_t mesq_noop_put_nacked;
28061+ atomic_long_unchecked_t mesq_noop_page_overflow;
28062
28063 };
28064
28065@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28066 tghop_invalidate, mcsop_last};
28067
28068 struct mcs_op_statistic {
28069- atomic_long_t count;
28070- atomic_long_t total;
28071+ atomic_long_unchecked_t count;
28072+ atomic_long_unchecked_t total;
28073 unsigned long max;
28074 };
28075
28076@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28077
28078 #define STAT(id) do { \
28079 if (gru_options & OPT_STATS) \
28080- atomic_long_inc(&gru_stats.id); \
28081+ atomic_long_inc_unchecked(&gru_stats.id); \
28082 } while (0)
28083
28084 #ifdef CONFIG_SGI_GRU_DEBUG
28085diff -urNp linux-3.0.4/drivers/misc/sgi-xp/xp.h linux-3.0.4/drivers/misc/sgi-xp/xp.h
28086--- linux-3.0.4/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
28087+++ linux-3.0.4/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
28088@@ -289,7 +289,7 @@ struct xpc_interface {
28089 xpc_notify_func, void *);
28090 void (*received) (short, int, void *);
28091 enum xp_retval (*partid_to_nasids) (short, void *);
28092-};
28093+} __no_const;
28094
28095 extern struct xpc_interface xpc_interface;
28096
28097diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c
28098--- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
28099+++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
28100@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28101 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28102 unsigned long timeo = jiffies + HZ;
28103
28104+ pax_track_stack();
28105+
28106 /* Prevent setting state FL_SYNCING for chip in suspended state. */
28107 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28108 goto sleep;
28109@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
28110 unsigned long initial_adr;
28111 int initial_len = len;
28112
28113+ pax_track_stack();
28114+
28115 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28116 adr += chip->start;
28117 initial_adr = adr;
28118@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
28119 int retries = 3;
28120 int ret;
28121
28122+ pax_track_stack();
28123+
28124 adr += chip->start;
28125
28126 retry:
28127diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c
28128--- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
28129+++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
28130@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28131 unsigned long cmd_addr;
28132 struct cfi_private *cfi = map->fldrv_priv;
28133
28134+ pax_track_stack();
28135+
28136 adr += chip->start;
28137
28138 /* Ensure cmd read/writes are aligned. */
28139@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
28140 DECLARE_WAITQUEUE(wait, current);
28141 int wbufsize, z;
28142
28143+ pax_track_stack();
28144+
28145 /* M58LW064A requires bus alignment for buffer wriets -- saw */
28146 if (adr & (map_bankwidth(map)-1))
28147 return -EINVAL;
28148@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
28149 DECLARE_WAITQUEUE(wait, current);
28150 int ret = 0;
28151
28152+ pax_track_stack();
28153+
28154 adr += chip->start;
28155
28156 /* Let's determine this according to the interleave only once */
28157@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
28158 unsigned long timeo = jiffies + HZ;
28159 DECLARE_WAITQUEUE(wait, current);
28160
28161+ pax_track_stack();
28162+
28163 adr += chip->start;
28164
28165 /* Let's determine this according to the interleave only once */
28166@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
28167 unsigned long timeo = jiffies + HZ;
28168 DECLARE_WAITQUEUE(wait, current);
28169
28170+ pax_track_stack();
28171+
28172 adr += chip->start;
28173
28174 /* Let's determine this according to the interleave only once */
28175diff -urNp linux-3.0.4/drivers/mtd/devices/doc2000.c linux-3.0.4/drivers/mtd/devices/doc2000.c
28176--- linux-3.0.4/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
28177+++ linux-3.0.4/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
28178@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28179
28180 /* The ECC will not be calculated correctly if less than 512 is written */
28181 /* DBB-
28182- if (len != 0x200 && eccbuf)
28183+ if (len != 0x200)
28184 printk(KERN_WARNING
28185 "ECC needs a full sector write (adr: %lx size %lx)\n",
28186 (long) to, (long) len);
28187diff -urNp linux-3.0.4/drivers/mtd/devices/doc2001.c linux-3.0.4/drivers/mtd/devices/doc2001.c
28188--- linux-3.0.4/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
28189+++ linux-3.0.4/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
28190@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28191 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28192
28193 /* Don't allow read past end of device */
28194- if (from >= this->totlen)
28195+ if (from >= this->totlen || !len)
28196 return -EINVAL;
28197
28198 /* Don't allow a single read to cross a 512-byte block boundary */
28199diff -urNp linux-3.0.4/drivers/mtd/ftl.c linux-3.0.4/drivers/mtd/ftl.c
28200--- linux-3.0.4/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
28201+++ linux-3.0.4/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
28202@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28203 loff_t offset;
28204 uint16_t srcunitswap = cpu_to_le16(srcunit);
28205
28206+ pax_track_stack();
28207+
28208 eun = &part->EUNInfo[srcunit];
28209 xfer = &part->XferInfo[xferunit];
28210 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28211diff -urNp linux-3.0.4/drivers/mtd/inftlcore.c linux-3.0.4/drivers/mtd/inftlcore.c
28212--- linux-3.0.4/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
28213+++ linux-3.0.4/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
28214@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28215 struct inftl_oob oob;
28216 size_t retlen;
28217
28218+ pax_track_stack();
28219+
28220 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28221 "pending=%d)\n", inftl, thisVUC, pendingblock);
28222
28223diff -urNp linux-3.0.4/drivers/mtd/inftlmount.c linux-3.0.4/drivers/mtd/inftlmount.c
28224--- linux-3.0.4/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
28225+++ linux-3.0.4/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
28226@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28227 struct INFTLPartition *ip;
28228 size_t retlen;
28229
28230+ pax_track_stack();
28231+
28232 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28233
28234 /*
28235diff -urNp linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c
28236--- linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
28237+++ linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
28238@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28239 {
28240 map_word pfow_val[4];
28241
28242+ pax_track_stack();
28243+
28244 /* Check identification string */
28245 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28246 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28247diff -urNp linux-3.0.4/drivers/mtd/mtdchar.c linux-3.0.4/drivers/mtd/mtdchar.c
28248--- linux-3.0.4/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
28249+++ linux-3.0.4/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
28250@@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
28251 u_long size;
28252 struct mtd_info_user info;
28253
28254+ pax_track_stack();
28255+
28256 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28257
28258 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28259diff -urNp linux-3.0.4/drivers/mtd/nand/denali.c linux-3.0.4/drivers/mtd/nand/denali.c
28260--- linux-3.0.4/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
28261+++ linux-3.0.4/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
28262@@ -26,6 +26,7 @@
28263 #include <linux/pci.h>
28264 #include <linux/mtd/mtd.h>
28265 #include <linux/module.h>
28266+#include <linux/slab.h>
28267
28268 #include "denali.h"
28269
28270diff -urNp linux-3.0.4/drivers/mtd/nftlcore.c linux-3.0.4/drivers/mtd/nftlcore.c
28271--- linux-3.0.4/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
28272+++ linux-3.0.4/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
28273@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28274 int inplace = 1;
28275 size_t retlen;
28276
28277+ pax_track_stack();
28278+
28279 memset(BlockMap, 0xff, sizeof(BlockMap));
28280 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28281
28282diff -urNp linux-3.0.4/drivers/mtd/nftlmount.c linux-3.0.4/drivers/mtd/nftlmount.c
28283--- linux-3.0.4/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
28284+++ linux-3.0.4/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
28285@@ -24,6 +24,7 @@
28286 #include <asm/errno.h>
28287 #include <linux/delay.h>
28288 #include <linux/slab.h>
28289+#include <linux/sched.h>
28290 #include <linux/mtd/mtd.h>
28291 #include <linux/mtd/nand.h>
28292 #include <linux/mtd/nftl.h>
28293@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28294 struct mtd_info *mtd = nftl->mbd.mtd;
28295 unsigned int i;
28296
28297+ pax_track_stack();
28298+
28299 /* Assume logical EraseSize == physical erasesize for starting the scan.
28300 We'll sort it out later if we find a MediaHeader which says otherwise */
28301 /* Actually, we won't. The new DiskOnChip driver has already scanned
28302diff -urNp linux-3.0.4/drivers/mtd/ubi/build.c linux-3.0.4/drivers/mtd/ubi/build.c
28303--- linux-3.0.4/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
28304+++ linux-3.0.4/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
28305@@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28306 static int __init bytes_str_to_int(const char *str)
28307 {
28308 char *endp;
28309- unsigned long result;
28310+ unsigned long result, scale = 1;
28311
28312 result = simple_strtoul(str, &endp, 0);
28313 if (str == endp || result >= INT_MAX) {
28314@@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28315
28316 switch (*endp) {
28317 case 'G':
28318- result *= 1024;
28319+ scale *= 1024;
28320 case 'M':
28321- result *= 1024;
28322+ scale *= 1024;
28323 case 'K':
28324- result *= 1024;
28325+ scale *= 1024;
28326 if (endp[1] == 'i' && endp[2] == 'B')
28327 endp += 2;
28328 case '\0':
28329@@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28330 return -EINVAL;
28331 }
28332
28333- return result;
28334+ if ((intoverflow_t)result*scale >= INT_MAX) {
28335+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28336+ str);
28337+ return -EINVAL;
28338+ }
28339+
28340+ return result*scale;
28341 }
28342
28343 /**
28344diff -urNp linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c
28345--- linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
28346+++ linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
28347@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28348 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28349 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28350
28351-static struct bfa_ioc_hwif nw_hwif_ct;
28352+static struct bfa_ioc_hwif nw_hwif_ct = {
28353+ .ioc_pll_init = bfa_ioc_ct_pll_init,
28354+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28355+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28356+ .ioc_reg_init = bfa_ioc_ct_reg_init,
28357+ .ioc_map_port = bfa_ioc_ct_map_port,
28358+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28359+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28360+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28361+ .ioc_sync_start = bfa_ioc_ct_sync_start,
28362+ .ioc_sync_join = bfa_ioc_ct_sync_join,
28363+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28364+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28365+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
28366+};
28367
28368 /**
28369 * Called from bfa_ioc_attach() to map asic specific calls.
28370@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28371 void
28372 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28373 {
28374- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28375- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28376- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28377- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28378- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28379- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28380- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28381- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28382- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28383- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28384- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28385- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28386- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28387-
28388 ioc->ioc_hwif = &nw_hwif_ct;
28389 }
28390
28391diff -urNp linux-3.0.4/drivers/net/bna/bnad.c linux-3.0.4/drivers/net/bna/bnad.c
28392--- linux-3.0.4/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
28393+++ linux-3.0.4/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
28394@@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28395 struct bna_intr_info *intr_info =
28396 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28397 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28398- struct bna_tx_event_cbfn tx_cbfn;
28399+ static struct bna_tx_event_cbfn tx_cbfn = {
28400+ /* Initialize the tx event handlers */
28401+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
28402+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28403+ .tx_stall_cbfn = bnad_cb_tx_stall,
28404+ .tx_resume_cbfn = bnad_cb_tx_resume,
28405+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28406+ };
28407 struct bna_tx *tx;
28408 unsigned long flags;
28409
28410@@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28411 tx_config->txq_depth = bnad->txq_depth;
28412 tx_config->tx_type = BNA_TX_T_REGULAR;
28413
28414- /* Initialize the tx event handlers */
28415- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28416- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28417- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28418- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28419- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28420-
28421 /* Get BNA's resource requirement for one tx object */
28422 spin_lock_irqsave(&bnad->bna_lock, flags);
28423 bna_tx_res_req(bnad->num_txq_per_tx,
28424@@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28425 struct bna_intr_info *intr_info =
28426 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28427 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28428- struct bna_rx_event_cbfn rx_cbfn;
28429+ static struct bna_rx_event_cbfn rx_cbfn = {
28430+ /* Initialize the Rx event handlers */
28431+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
28432+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28433+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
28434+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28435+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28436+ .rx_post_cbfn = bnad_cb_rx_post
28437+ };
28438 struct bna_rx *rx;
28439 unsigned long flags;
28440
28441 /* Initialize the Rx object configuration */
28442 bnad_init_rx_config(bnad, rx_config);
28443
28444- /* Initialize the Rx event handlers */
28445- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28446- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28447- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28448- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28449- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28450- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28451-
28452 /* Get BNA's resource requirement for one Rx object */
28453 spin_lock_irqsave(&bnad->bna_lock, flags);
28454 bna_rx_res_req(rx_config, res_info);
28455diff -urNp linux-3.0.4/drivers/net/bnx2.c linux-3.0.4/drivers/net/bnx2.c
28456--- linux-3.0.4/drivers/net/bnx2.c 2011-07-21 22:17:23.000000000 -0400
28457+++ linux-3.0.4/drivers/net/bnx2.c 2011-08-23 21:48:14.000000000 -0400
28458@@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
28459 int rc = 0;
28460 u32 magic, csum;
28461
28462+ pax_track_stack();
28463+
28464 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
28465 goto test_nvram_done;
28466
28467diff -urNp linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c
28468--- linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
28469+++ linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
28470@@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
28471 int i, rc;
28472 u32 magic, crc;
28473
28474+ pax_track_stack();
28475+
28476 if (BP_NOMCP(bp))
28477 return 0;
28478
28479diff -urNp linux-3.0.4/drivers/net/cxgb3/l2t.h linux-3.0.4/drivers/net/cxgb3/l2t.h
28480--- linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-07-21 22:17:23.000000000 -0400
28481+++ linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-08-23 21:47:55.000000000 -0400
28482@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
28483 */
28484 struct l2t_skb_cb {
28485 arp_failure_handler_func arp_failure_handler;
28486-};
28487+} __no_const;
28488
28489 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
28490
28491diff -urNp linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c
28492--- linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
28493+++ linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
28494@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
28495 unsigned int nchan = adap->params.nports;
28496 struct msix_entry entries[MAX_INGQ + 1];
28497
28498+ pax_track_stack();
28499+
28500 for (i = 0; i < ARRAY_SIZE(entries); ++i)
28501 entries[i].entry = i;
28502
28503diff -urNp linux-3.0.4/drivers/net/cxgb4/t4_hw.c linux-3.0.4/drivers/net/cxgb4/t4_hw.c
28504--- linux-3.0.4/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
28505+++ linux-3.0.4/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
28506@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
28507 u8 vpd[VPD_LEN], csum;
28508 unsigned int vpdr_len, kw_offset, id_len;
28509
28510+ pax_track_stack();
28511+
28512 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
28513 if (ret < 0)
28514 return ret;
28515diff -urNp linux-3.0.4/drivers/net/e1000e/82571.c linux-3.0.4/drivers/net/e1000e/82571.c
28516--- linux-3.0.4/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
28517+++ linux-3.0.4/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
28518@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
28519 {
28520 struct e1000_hw *hw = &adapter->hw;
28521 struct e1000_mac_info *mac = &hw->mac;
28522- struct e1000_mac_operations *func = &mac->ops;
28523+ e1000_mac_operations_no_const *func = &mac->ops;
28524 u32 swsm = 0;
28525 u32 swsm2 = 0;
28526 bool force_clear_smbi = false;
28527diff -urNp linux-3.0.4/drivers/net/e1000e/es2lan.c linux-3.0.4/drivers/net/e1000e/es2lan.c
28528--- linux-3.0.4/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
28529+++ linux-3.0.4/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
28530@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
28531 {
28532 struct e1000_hw *hw = &adapter->hw;
28533 struct e1000_mac_info *mac = &hw->mac;
28534- struct e1000_mac_operations *func = &mac->ops;
28535+ e1000_mac_operations_no_const *func = &mac->ops;
28536
28537 /* Set media type */
28538 switch (adapter->pdev->device) {
28539diff -urNp linux-3.0.4/drivers/net/e1000e/hw.h linux-3.0.4/drivers/net/e1000e/hw.h
28540--- linux-3.0.4/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
28541+++ linux-3.0.4/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
28542@@ -776,6 +776,7 @@ struct e1000_mac_operations {
28543 void (*write_vfta)(struct e1000_hw *, u32, u32);
28544 s32 (*read_mac_addr)(struct e1000_hw *);
28545 };
28546+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28547
28548 /* Function pointers for the PHY. */
28549 struct e1000_phy_operations {
28550@@ -799,6 +800,7 @@ struct e1000_phy_operations {
28551 void (*power_up)(struct e1000_hw *);
28552 void (*power_down)(struct e1000_hw *);
28553 };
28554+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28555
28556 /* Function pointers for the NVM. */
28557 struct e1000_nvm_operations {
28558@@ -810,9 +812,10 @@ struct e1000_nvm_operations {
28559 s32 (*validate)(struct e1000_hw *);
28560 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
28561 };
28562+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28563
28564 struct e1000_mac_info {
28565- struct e1000_mac_operations ops;
28566+ e1000_mac_operations_no_const ops;
28567 u8 addr[ETH_ALEN];
28568 u8 perm_addr[ETH_ALEN];
28569
28570@@ -853,7 +856,7 @@ struct e1000_mac_info {
28571 };
28572
28573 struct e1000_phy_info {
28574- struct e1000_phy_operations ops;
28575+ e1000_phy_operations_no_const ops;
28576
28577 enum e1000_phy_type type;
28578
28579@@ -887,7 +890,7 @@ struct e1000_phy_info {
28580 };
28581
28582 struct e1000_nvm_info {
28583- struct e1000_nvm_operations ops;
28584+ e1000_nvm_operations_no_const ops;
28585
28586 enum e1000_nvm_type type;
28587 enum e1000_nvm_override override;
28588diff -urNp linux-3.0.4/drivers/net/hamradio/6pack.c linux-3.0.4/drivers/net/hamradio/6pack.c
28589--- linux-3.0.4/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
28590+++ linux-3.0.4/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
28591@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
28592 unsigned char buf[512];
28593 int count1;
28594
28595+ pax_track_stack();
28596+
28597 if (!count)
28598 return;
28599
28600diff -urNp linux-3.0.4/drivers/net/igb/e1000_hw.h linux-3.0.4/drivers/net/igb/e1000_hw.h
28601--- linux-3.0.4/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
28602+++ linux-3.0.4/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
28603@@ -314,6 +314,7 @@ struct e1000_mac_operations {
28604 s32 (*read_mac_addr)(struct e1000_hw *);
28605 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
28606 };
28607+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28608
28609 struct e1000_phy_operations {
28610 s32 (*acquire)(struct e1000_hw *);
28611@@ -330,6 +331,7 @@ struct e1000_phy_operations {
28612 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
28613 s32 (*write_reg)(struct e1000_hw *, u32, u16);
28614 };
28615+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28616
28617 struct e1000_nvm_operations {
28618 s32 (*acquire)(struct e1000_hw *);
28619@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
28620 s32 (*update)(struct e1000_hw *);
28621 s32 (*validate)(struct e1000_hw *);
28622 };
28623+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28624
28625 struct e1000_info {
28626 s32 (*get_invariants)(struct e1000_hw *);
28627@@ -350,7 +353,7 @@ struct e1000_info {
28628 extern const struct e1000_info e1000_82575_info;
28629
28630 struct e1000_mac_info {
28631- struct e1000_mac_operations ops;
28632+ e1000_mac_operations_no_const ops;
28633
28634 u8 addr[6];
28635 u8 perm_addr[6];
28636@@ -388,7 +391,7 @@ struct e1000_mac_info {
28637 };
28638
28639 struct e1000_phy_info {
28640- struct e1000_phy_operations ops;
28641+ e1000_phy_operations_no_const ops;
28642
28643 enum e1000_phy_type type;
28644
28645@@ -423,7 +426,7 @@ struct e1000_phy_info {
28646 };
28647
28648 struct e1000_nvm_info {
28649- struct e1000_nvm_operations ops;
28650+ e1000_nvm_operations_no_const ops;
28651 enum e1000_nvm_type type;
28652 enum e1000_nvm_override override;
28653
28654@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
28655 s32 (*check_for_ack)(struct e1000_hw *, u16);
28656 s32 (*check_for_rst)(struct e1000_hw *, u16);
28657 };
28658+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28659
28660 struct e1000_mbx_stats {
28661 u32 msgs_tx;
28662@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
28663 };
28664
28665 struct e1000_mbx_info {
28666- struct e1000_mbx_operations ops;
28667+ e1000_mbx_operations_no_const ops;
28668 struct e1000_mbx_stats stats;
28669 u32 timeout;
28670 u32 usec_delay;
28671diff -urNp linux-3.0.4/drivers/net/igbvf/vf.h linux-3.0.4/drivers/net/igbvf/vf.h
28672--- linux-3.0.4/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
28673+++ linux-3.0.4/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
28674@@ -189,9 +189,10 @@ struct e1000_mac_operations {
28675 s32 (*read_mac_addr)(struct e1000_hw *);
28676 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
28677 };
28678+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28679
28680 struct e1000_mac_info {
28681- struct e1000_mac_operations ops;
28682+ e1000_mac_operations_no_const ops;
28683 u8 addr[6];
28684 u8 perm_addr[6];
28685
28686@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
28687 s32 (*check_for_ack)(struct e1000_hw *);
28688 s32 (*check_for_rst)(struct e1000_hw *);
28689 };
28690+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28691
28692 struct e1000_mbx_stats {
28693 u32 msgs_tx;
28694@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
28695 };
28696
28697 struct e1000_mbx_info {
28698- struct e1000_mbx_operations ops;
28699+ e1000_mbx_operations_no_const ops;
28700 struct e1000_mbx_stats stats;
28701 u32 timeout;
28702 u32 usec_delay;
28703diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_main.c linux-3.0.4/drivers/net/ixgb/ixgb_main.c
28704--- linux-3.0.4/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
28705+++ linux-3.0.4/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
28706@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
28707 u32 rctl;
28708 int i;
28709
28710+ pax_track_stack();
28711+
28712 /* Check for Promiscuous and All Multicast modes */
28713
28714 rctl = IXGB_READ_REG(hw, RCTL);
28715diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_param.c linux-3.0.4/drivers/net/ixgb/ixgb_param.c
28716--- linux-3.0.4/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
28717+++ linux-3.0.4/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
28718@@ -261,6 +261,9 @@ void __devinit
28719 ixgb_check_options(struct ixgb_adapter *adapter)
28720 {
28721 int bd = adapter->bd_number;
28722+
28723+ pax_track_stack();
28724+
28725 if (bd >= IXGB_MAX_NIC) {
28726 pr_notice("Warning: no configuration for board #%i\n", bd);
28727 pr_notice("Using defaults for all values\n");
28728diff -urNp linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h
28729--- linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
28730+++ linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
28731@@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
28732 s32 (*update_checksum)(struct ixgbe_hw *);
28733 u16 (*calc_checksum)(struct ixgbe_hw *);
28734 };
28735+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
28736
28737 struct ixgbe_mac_operations {
28738 s32 (*init_hw)(struct ixgbe_hw *);
28739@@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
28740 /* Flow Control */
28741 s32 (*fc_enable)(struct ixgbe_hw *, s32);
28742 };
28743+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28744
28745 struct ixgbe_phy_operations {
28746 s32 (*identify)(struct ixgbe_hw *);
28747@@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
28748 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
28749 s32 (*check_overtemp)(struct ixgbe_hw *);
28750 };
28751+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
28752
28753 struct ixgbe_eeprom_info {
28754- struct ixgbe_eeprom_operations ops;
28755+ ixgbe_eeprom_operations_no_const ops;
28756 enum ixgbe_eeprom_type type;
28757 u32 semaphore_delay;
28758 u16 word_size;
28759@@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
28760
28761 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
28762 struct ixgbe_mac_info {
28763- struct ixgbe_mac_operations ops;
28764+ ixgbe_mac_operations_no_const ops;
28765 enum ixgbe_mac_type type;
28766 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28767 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28768@@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
28769 };
28770
28771 struct ixgbe_phy_info {
28772- struct ixgbe_phy_operations ops;
28773+ ixgbe_phy_operations_no_const ops;
28774 struct mdio_if_info mdio;
28775 enum ixgbe_phy_type type;
28776 u32 id;
28777@@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
28778 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
28779 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
28780 };
28781+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28782
28783 struct ixgbe_mbx_stats {
28784 u32 msgs_tx;
28785@@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
28786 };
28787
28788 struct ixgbe_mbx_info {
28789- struct ixgbe_mbx_operations ops;
28790+ ixgbe_mbx_operations_no_const ops;
28791 struct ixgbe_mbx_stats stats;
28792 u32 timeout;
28793 u32 usec_delay;
28794diff -urNp linux-3.0.4/drivers/net/ixgbevf/vf.h linux-3.0.4/drivers/net/ixgbevf/vf.h
28795--- linux-3.0.4/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
28796+++ linux-3.0.4/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
28797@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
28798 s32 (*clear_vfta)(struct ixgbe_hw *);
28799 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
28800 };
28801+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28802
28803 enum ixgbe_mac_type {
28804 ixgbe_mac_unknown = 0,
28805@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
28806 };
28807
28808 struct ixgbe_mac_info {
28809- struct ixgbe_mac_operations ops;
28810+ ixgbe_mac_operations_no_const ops;
28811 u8 addr[6];
28812 u8 perm_addr[6];
28813
28814@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
28815 s32 (*check_for_ack)(struct ixgbe_hw *);
28816 s32 (*check_for_rst)(struct ixgbe_hw *);
28817 };
28818+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28819
28820 struct ixgbe_mbx_stats {
28821 u32 msgs_tx;
28822@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
28823 };
28824
28825 struct ixgbe_mbx_info {
28826- struct ixgbe_mbx_operations ops;
28827+ ixgbe_mbx_operations_no_const ops;
28828 struct ixgbe_mbx_stats stats;
28829 u32 timeout;
28830 u32 udelay;
28831diff -urNp linux-3.0.4/drivers/net/ksz884x.c linux-3.0.4/drivers/net/ksz884x.c
28832--- linux-3.0.4/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
28833+++ linux-3.0.4/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
28834@@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
28835 int rc;
28836 u64 counter[TOTAL_PORT_COUNTER_NUM];
28837
28838+ pax_track_stack();
28839+
28840 mutex_lock(&hw_priv->lock);
28841 n = SWITCH_PORT_NUM;
28842 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
28843diff -urNp linux-3.0.4/drivers/net/mlx4/main.c linux-3.0.4/drivers/net/mlx4/main.c
28844--- linux-3.0.4/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
28845+++ linux-3.0.4/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
28846@@ -40,6 +40,7 @@
28847 #include <linux/dma-mapping.h>
28848 #include <linux/slab.h>
28849 #include <linux/io-mapping.h>
28850+#include <linux/sched.h>
28851
28852 #include <linux/mlx4/device.h>
28853 #include <linux/mlx4/doorbell.h>
28854@@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
28855 u64 icm_size;
28856 int err;
28857
28858+ pax_track_stack();
28859+
28860 err = mlx4_QUERY_FW(dev);
28861 if (err) {
28862 if (err == -EACCES)
28863diff -urNp linux-3.0.4/drivers/net/niu.c linux-3.0.4/drivers/net/niu.c
28864--- linux-3.0.4/drivers/net/niu.c 2011-09-02 18:11:21.000000000 -0400
28865+++ linux-3.0.4/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
28866@@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
28867 int i, num_irqs, err;
28868 u8 first_ldg;
28869
28870+ pax_track_stack();
28871+
28872 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
28873 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
28874 ldg_num_map[i] = first_ldg + i;
28875diff -urNp linux-3.0.4/drivers/net/pcnet32.c linux-3.0.4/drivers/net/pcnet32.c
28876--- linux-3.0.4/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
28877+++ linux-3.0.4/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
28878@@ -82,7 +82,7 @@ static int cards_found;
28879 /*
28880 * VLB I/O addresses
28881 */
28882-static unsigned int pcnet32_portlist[] __initdata =
28883+static unsigned int pcnet32_portlist[] __devinitdata =
28884 { 0x300, 0x320, 0x340, 0x360, 0 };
28885
28886 static int pcnet32_debug;
28887@@ -270,7 +270,7 @@ struct pcnet32_private {
28888 struct sk_buff **rx_skbuff;
28889 dma_addr_t *tx_dma_addr;
28890 dma_addr_t *rx_dma_addr;
28891- struct pcnet32_access a;
28892+ struct pcnet32_access *a;
28893 spinlock_t lock; /* Guard lock */
28894 unsigned int cur_rx, cur_tx; /* The next free ring entry */
28895 unsigned int rx_ring_size; /* current rx ring size */
28896@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
28897 u16 val;
28898
28899 netif_wake_queue(dev);
28900- val = lp->a.read_csr(ioaddr, CSR3);
28901+ val = lp->a->read_csr(ioaddr, CSR3);
28902 val &= 0x00ff;
28903- lp->a.write_csr(ioaddr, CSR3, val);
28904+ lp->a->write_csr(ioaddr, CSR3, val);
28905 napi_enable(&lp->napi);
28906 }
28907
28908@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
28909 r = mii_link_ok(&lp->mii_if);
28910 } else if (lp->chip_version >= PCNET32_79C970A) {
28911 ulong ioaddr = dev->base_addr; /* card base I/O address */
28912- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
28913+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
28914 } else { /* can not detect link on really old chips */
28915 r = 1;
28916 }
28917@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
28918 pcnet32_netif_stop(dev);
28919
28920 spin_lock_irqsave(&lp->lock, flags);
28921- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28922+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28923
28924 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
28925
28926@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
28927 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
28928 {
28929 struct pcnet32_private *lp = netdev_priv(dev);
28930- struct pcnet32_access *a = &lp->a; /* access to registers */
28931+ struct pcnet32_access *a = lp->a; /* access to registers */
28932 ulong ioaddr = dev->base_addr; /* card base I/O address */
28933 struct sk_buff *skb; /* sk buff */
28934 int x, i; /* counters */
28935@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
28936 pcnet32_netif_stop(dev);
28937
28938 spin_lock_irqsave(&lp->lock, flags);
28939- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28940+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28941
28942 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
28943
28944 /* Reset the PCNET32 */
28945- lp->a.reset(ioaddr);
28946- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28947+ lp->a->reset(ioaddr);
28948+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28949
28950 /* switch pcnet32 to 32bit mode */
28951- lp->a.write_bcr(ioaddr, 20, 2);
28952+ lp->a->write_bcr(ioaddr, 20, 2);
28953
28954 /* purge & init rings but don't actually restart */
28955 pcnet32_restart(dev, 0x0000);
28956
28957- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28958+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28959
28960 /* Initialize Transmit buffers. */
28961 size = data_len + 15;
28962@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
28963
28964 /* set int loopback in CSR15 */
28965 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
28966- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
28967+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
28968
28969 teststatus = cpu_to_le16(0x8000);
28970- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28971+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28972
28973 /* Check status of descriptors */
28974 for (x = 0; x < numbuffs; x++) {
28975@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
28976 }
28977 }
28978
28979- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28980+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28981 wmb();
28982 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
28983 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
28984@@ -1015,7 +1015,7 @@ clean_up:
28985 pcnet32_restart(dev, CSR0_NORMAL);
28986 } else {
28987 pcnet32_purge_rx_ring(dev);
28988- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
28989+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
28990 }
28991 spin_unlock_irqrestore(&lp->lock, flags);
28992
28993@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
28994 enum ethtool_phys_id_state state)
28995 {
28996 struct pcnet32_private *lp = netdev_priv(dev);
28997- struct pcnet32_access *a = &lp->a;
28998+ struct pcnet32_access *a = lp->a;
28999 ulong ioaddr = dev->base_addr;
29000 unsigned long flags;
29001 int i;
29002@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
29003 {
29004 int csr5;
29005 struct pcnet32_private *lp = netdev_priv(dev);
29006- struct pcnet32_access *a = &lp->a;
29007+ struct pcnet32_access *a = lp->a;
29008 ulong ioaddr = dev->base_addr;
29009 int ticks;
29010
29011@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
29012 spin_lock_irqsave(&lp->lock, flags);
29013 if (pcnet32_tx(dev)) {
29014 /* reset the chip to clear the error condition, then restart */
29015- lp->a.reset(ioaddr);
29016- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29017+ lp->a->reset(ioaddr);
29018+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29019 pcnet32_restart(dev, CSR0_START);
29020 netif_wake_queue(dev);
29021 }
29022@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
29023 __napi_complete(napi);
29024
29025 /* clear interrupt masks */
29026- val = lp->a.read_csr(ioaddr, CSR3);
29027+ val = lp->a->read_csr(ioaddr, CSR3);
29028 val &= 0x00ff;
29029- lp->a.write_csr(ioaddr, CSR3, val);
29030+ lp->a->write_csr(ioaddr, CSR3, val);
29031
29032 /* Set interrupt enable. */
29033- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29034+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29035
29036 spin_unlock_irqrestore(&lp->lock, flags);
29037 }
29038@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
29039 int i, csr0;
29040 u16 *buff = ptr;
29041 struct pcnet32_private *lp = netdev_priv(dev);
29042- struct pcnet32_access *a = &lp->a;
29043+ struct pcnet32_access *a = lp->a;
29044 ulong ioaddr = dev->base_addr;
29045 unsigned long flags;
29046
29047@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
29048 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29049 if (lp->phymask & (1 << j)) {
29050 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29051- lp->a.write_bcr(ioaddr, 33,
29052+ lp->a->write_bcr(ioaddr, 33,
29053 (j << 5) | i);
29054- *buff++ = lp->a.read_bcr(ioaddr, 34);
29055+ *buff++ = lp->a->read_bcr(ioaddr, 34);
29056 }
29057 }
29058 }
29059@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29060 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29061 lp->options |= PCNET32_PORT_FD;
29062
29063- lp->a = *a;
29064+ lp->a = a;
29065
29066 /* prior to register_netdev, dev->name is not yet correct */
29067 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29068@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29069 if (lp->mii) {
29070 /* lp->phycount and lp->phymask are set to 0 by memset above */
29071
29072- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29073+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29074 /* scan for PHYs */
29075 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29076 unsigned short id1, id2;
29077@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29078 pr_info("Found PHY %04x:%04x at address %d\n",
29079 id1, id2, i);
29080 }
29081- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29082+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29083 if (lp->phycount > 1)
29084 lp->options |= PCNET32_PORT_MII;
29085 }
29086@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
29087 }
29088
29089 /* Reset the PCNET32 */
29090- lp->a.reset(ioaddr);
29091+ lp->a->reset(ioaddr);
29092
29093 /* switch pcnet32 to 32bit mode */
29094- lp->a.write_bcr(ioaddr, 20, 2);
29095+ lp->a->write_bcr(ioaddr, 20, 2);
29096
29097 netif_printk(lp, ifup, KERN_DEBUG, dev,
29098 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29099@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
29100 (u32) (lp->init_dma_addr));
29101
29102 /* set/reset autoselect bit */
29103- val = lp->a.read_bcr(ioaddr, 2) & ~2;
29104+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
29105 if (lp->options & PCNET32_PORT_ASEL)
29106 val |= 2;
29107- lp->a.write_bcr(ioaddr, 2, val);
29108+ lp->a->write_bcr(ioaddr, 2, val);
29109
29110 /* handle full duplex setting */
29111 if (lp->mii_if.full_duplex) {
29112- val = lp->a.read_bcr(ioaddr, 9) & ~3;
29113+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
29114 if (lp->options & PCNET32_PORT_FD) {
29115 val |= 1;
29116 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29117@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
29118 if (lp->chip_version == 0x2627)
29119 val |= 3;
29120 }
29121- lp->a.write_bcr(ioaddr, 9, val);
29122+ lp->a->write_bcr(ioaddr, 9, val);
29123 }
29124
29125 /* set/reset GPSI bit in test register */
29126- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29127+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29128 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29129 val |= 0x10;
29130- lp->a.write_csr(ioaddr, 124, val);
29131+ lp->a->write_csr(ioaddr, 124, val);
29132
29133 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29134 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29135@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
29136 * duplex, and/or enable auto negotiation, and clear DANAS
29137 */
29138 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29139- lp->a.write_bcr(ioaddr, 32,
29140- lp->a.read_bcr(ioaddr, 32) | 0x0080);
29141+ lp->a->write_bcr(ioaddr, 32,
29142+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
29143 /* disable Auto Negotiation, set 10Mpbs, HD */
29144- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29145+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29146 if (lp->options & PCNET32_PORT_FD)
29147 val |= 0x10;
29148 if (lp->options & PCNET32_PORT_100)
29149 val |= 0x08;
29150- lp->a.write_bcr(ioaddr, 32, val);
29151+ lp->a->write_bcr(ioaddr, 32, val);
29152 } else {
29153 if (lp->options & PCNET32_PORT_ASEL) {
29154- lp->a.write_bcr(ioaddr, 32,
29155- lp->a.read_bcr(ioaddr,
29156+ lp->a->write_bcr(ioaddr, 32,
29157+ lp->a->read_bcr(ioaddr,
29158 32) | 0x0080);
29159 /* enable auto negotiate, setup, disable fd */
29160- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29161+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29162 val |= 0x20;
29163- lp->a.write_bcr(ioaddr, 32, val);
29164+ lp->a->write_bcr(ioaddr, 32, val);
29165 }
29166 }
29167 } else {
29168@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
29169 * There is really no good other way to handle multiple PHYs
29170 * other than turning off all automatics
29171 */
29172- val = lp->a.read_bcr(ioaddr, 2);
29173- lp->a.write_bcr(ioaddr, 2, val & ~2);
29174- val = lp->a.read_bcr(ioaddr, 32);
29175- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29176+ val = lp->a->read_bcr(ioaddr, 2);
29177+ lp->a->write_bcr(ioaddr, 2, val & ~2);
29178+ val = lp->a->read_bcr(ioaddr, 32);
29179+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29180
29181 if (!(lp->options & PCNET32_PORT_ASEL)) {
29182 /* setup ecmd */
29183@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
29184 ethtool_cmd_speed_set(&ecmd,
29185 (lp->options & PCNET32_PORT_100) ?
29186 SPEED_100 : SPEED_10);
29187- bcr9 = lp->a.read_bcr(ioaddr, 9);
29188+ bcr9 = lp->a->read_bcr(ioaddr, 9);
29189
29190 if (lp->options & PCNET32_PORT_FD) {
29191 ecmd.duplex = DUPLEX_FULL;
29192@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
29193 ecmd.duplex = DUPLEX_HALF;
29194 bcr9 |= ~(1 << 0);
29195 }
29196- lp->a.write_bcr(ioaddr, 9, bcr9);
29197+ lp->a->write_bcr(ioaddr, 9, bcr9);
29198 }
29199
29200 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29201@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
29202
29203 #ifdef DO_DXSUFLO
29204 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29205- val = lp->a.read_csr(ioaddr, CSR3);
29206+ val = lp->a->read_csr(ioaddr, CSR3);
29207 val |= 0x40;
29208- lp->a.write_csr(ioaddr, CSR3, val);
29209+ lp->a->write_csr(ioaddr, CSR3, val);
29210 }
29211 #endif
29212
29213@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
29214 napi_enable(&lp->napi);
29215
29216 /* Re-initialize the PCNET32, and start it when done. */
29217- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29218- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29219+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29220+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29221
29222- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29223- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29224+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29225+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29226
29227 netif_start_queue(dev);
29228
29229@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
29230
29231 i = 0;
29232 while (i++ < 100)
29233- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29234+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29235 break;
29236 /*
29237 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29238 * reports that doing so triggers a bug in the '974.
29239 */
29240- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29241+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29242
29243 netif_printk(lp, ifup, KERN_DEBUG, dev,
29244 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29245 i,
29246 (u32) (lp->init_dma_addr),
29247- lp->a.read_csr(ioaddr, CSR0));
29248+ lp->a->read_csr(ioaddr, CSR0));
29249
29250 spin_unlock_irqrestore(&lp->lock, flags);
29251
29252@@ -2218,7 +2218,7 @@ err_free_ring:
29253 * Switch back to 16bit mode to avoid problems with dumb
29254 * DOS packet driver after a warm reboot
29255 */
29256- lp->a.write_bcr(ioaddr, 20, 4);
29257+ lp->a->write_bcr(ioaddr, 20, 4);
29258
29259 err_free_irq:
29260 spin_unlock_irqrestore(&lp->lock, flags);
29261@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
29262
29263 /* wait for stop */
29264 for (i = 0; i < 100; i++)
29265- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29266+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29267 break;
29268
29269 if (i >= 100)
29270@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
29271 return;
29272
29273 /* ReInit Ring */
29274- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29275+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29276 i = 0;
29277 while (i++ < 1000)
29278- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29279+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29280 break;
29281
29282- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29283+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29284 }
29285
29286 static void pcnet32_tx_timeout(struct net_device *dev)
29287@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
29288 /* Transmitter timeout, serious problems. */
29289 if (pcnet32_debug & NETIF_MSG_DRV)
29290 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29291- dev->name, lp->a.read_csr(ioaddr, CSR0));
29292- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29293+ dev->name, lp->a->read_csr(ioaddr, CSR0));
29294+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29295 dev->stats.tx_errors++;
29296 if (netif_msg_tx_err(lp)) {
29297 int i;
29298@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29299
29300 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29301 "%s() called, csr0 %4.4x\n",
29302- __func__, lp->a.read_csr(ioaddr, CSR0));
29303+ __func__, lp->a->read_csr(ioaddr, CSR0));
29304
29305 /* Default status -- will not enable Successful-TxDone
29306 * interrupt when that option is available to us.
29307@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29308 dev->stats.tx_bytes += skb->len;
29309
29310 /* Trigger an immediate send poll. */
29311- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29312+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29313
29314 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29315 lp->tx_full = 1;
29316@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29317
29318 spin_lock(&lp->lock);
29319
29320- csr0 = lp->a.read_csr(ioaddr, CSR0);
29321+ csr0 = lp->a->read_csr(ioaddr, CSR0);
29322 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29323 if (csr0 == 0xffff)
29324 break; /* PCMCIA remove happened */
29325 /* Acknowledge all of the current interrupt sources ASAP. */
29326- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29327+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29328
29329 netif_printk(lp, intr, KERN_DEBUG, dev,
29330 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29331- csr0, lp->a.read_csr(ioaddr, CSR0));
29332+ csr0, lp->a->read_csr(ioaddr, CSR0));
29333
29334 /* Log misc errors. */
29335 if (csr0 & 0x4000)
29336@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29337 if (napi_schedule_prep(&lp->napi)) {
29338 u16 val;
29339 /* set interrupt masks */
29340- val = lp->a.read_csr(ioaddr, CSR3);
29341+ val = lp->a->read_csr(ioaddr, CSR3);
29342 val |= 0x5f00;
29343- lp->a.write_csr(ioaddr, CSR3, val);
29344+ lp->a->write_csr(ioaddr, CSR3, val);
29345
29346 __napi_schedule(&lp->napi);
29347 break;
29348 }
29349- csr0 = lp->a.read_csr(ioaddr, CSR0);
29350+ csr0 = lp->a->read_csr(ioaddr, CSR0);
29351 }
29352
29353 netif_printk(lp, intr, KERN_DEBUG, dev,
29354 "exiting interrupt, csr0=%#4.4x\n",
29355- lp->a.read_csr(ioaddr, CSR0));
29356+ lp->a->read_csr(ioaddr, CSR0));
29357
29358 spin_unlock(&lp->lock);
29359
29360@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
29361
29362 spin_lock_irqsave(&lp->lock, flags);
29363
29364- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29365+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29366
29367 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29368 "Shutting down ethercard, status was %2.2x\n",
29369- lp->a.read_csr(ioaddr, CSR0));
29370+ lp->a->read_csr(ioaddr, CSR0));
29371
29372 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29373- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29374+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29375
29376 /*
29377 * Switch back to 16bit mode to avoid problems with dumb
29378 * DOS packet driver after a warm reboot
29379 */
29380- lp->a.write_bcr(ioaddr, 20, 4);
29381+ lp->a->write_bcr(ioaddr, 20, 4);
29382
29383 spin_unlock_irqrestore(&lp->lock, flags);
29384
29385@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
29386 unsigned long flags;
29387
29388 spin_lock_irqsave(&lp->lock, flags);
29389- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29390+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29391 spin_unlock_irqrestore(&lp->lock, flags);
29392
29393 return &dev->stats;
29394@@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
29395 if (dev->flags & IFF_ALLMULTI) {
29396 ib->filter[0] = cpu_to_le32(~0U);
29397 ib->filter[1] = cpu_to_le32(~0U);
29398- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29399- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29400- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29401- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29402+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29403+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29404+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29405+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29406 return;
29407 }
29408 /* clear the multicast filter */
29409@@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
29410 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29411 }
29412 for (i = 0; i < 4; i++)
29413- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29414+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29415 le16_to_cpu(mcast_table[i]));
29416 }
29417
29418@@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
29419
29420 spin_lock_irqsave(&lp->lock, flags);
29421 suspended = pcnet32_suspend(dev, &flags, 0);
29422- csr15 = lp->a.read_csr(ioaddr, CSR15);
29423+ csr15 = lp->a->read_csr(ioaddr, CSR15);
29424 if (dev->flags & IFF_PROMISC) {
29425 /* Log any net taps. */
29426 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29427 lp->init_block->mode =
29428 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29429 7);
29430- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29431+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29432 } else {
29433 lp->init_block->mode =
29434 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29435- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29436+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29437 pcnet32_load_multicast(dev);
29438 }
29439
29440 if (suspended) {
29441 int csr5;
29442 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29443- csr5 = lp->a.read_csr(ioaddr, CSR5);
29444- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29445+ csr5 = lp->a->read_csr(ioaddr, CSR5);
29446+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29447 } else {
29448- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29449+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29450 pcnet32_restart(dev, CSR0_NORMAL);
29451 netif_wake_queue(dev);
29452 }
29453@@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
29454 if (!lp->mii)
29455 return 0;
29456
29457- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29458- val_out = lp->a.read_bcr(ioaddr, 34);
29459+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29460+ val_out = lp->a->read_bcr(ioaddr, 34);
29461
29462 return val_out;
29463 }
29464@@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
29465 if (!lp->mii)
29466 return;
29467
29468- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29469- lp->a.write_bcr(ioaddr, 34, val);
29470+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29471+ lp->a->write_bcr(ioaddr, 34, val);
29472 }
29473
29474 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
29475@@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
29476 curr_link = mii_link_ok(&lp->mii_if);
29477 } else {
29478 ulong ioaddr = dev->base_addr; /* card base I/O address */
29479- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29480+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29481 }
29482 if (!curr_link) {
29483 if (prev_link || verbose) {
29484@@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
29485 (ecmd.duplex == DUPLEX_FULL)
29486 ? "full" : "half");
29487 }
29488- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
29489+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
29490 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
29491 if (lp->mii_if.full_duplex)
29492 bcr9 |= (1 << 0);
29493 else
29494 bcr9 &= ~(1 << 0);
29495- lp->a.write_bcr(dev->base_addr, 9, bcr9);
29496+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
29497 }
29498 } else {
29499 netif_info(lp, link, dev, "link up\n");
29500diff -urNp linux-3.0.4/drivers/net/ppp_generic.c linux-3.0.4/drivers/net/ppp_generic.c
29501--- linux-3.0.4/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
29502+++ linux-3.0.4/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
29503@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
29504 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
29505 struct ppp_stats stats;
29506 struct ppp_comp_stats cstats;
29507- char *vers;
29508
29509 switch (cmd) {
29510 case SIOCGPPPSTATS:
29511@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
29512 break;
29513
29514 case SIOCGPPPVER:
29515- vers = PPP_VERSION;
29516- if (copy_to_user(addr, vers, strlen(vers) + 1))
29517+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
29518 break;
29519 err = 0;
29520 break;
29521diff -urNp linux-3.0.4/drivers/net/r8169.c linux-3.0.4/drivers/net/r8169.c
29522--- linux-3.0.4/drivers/net/r8169.c 2011-09-02 18:11:21.000000000 -0400
29523+++ linux-3.0.4/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
29524@@ -645,12 +645,12 @@ struct rtl8169_private {
29525 struct mdio_ops {
29526 void (*write)(void __iomem *, int, int);
29527 int (*read)(void __iomem *, int);
29528- } mdio_ops;
29529+ } __no_const mdio_ops;
29530
29531 struct pll_power_ops {
29532 void (*down)(struct rtl8169_private *);
29533 void (*up)(struct rtl8169_private *);
29534- } pll_power_ops;
29535+ } __no_const pll_power_ops;
29536
29537 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
29538 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
29539diff -urNp linux-3.0.4/drivers/net/tg3.h linux-3.0.4/drivers/net/tg3.h
29540--- linux-3.0.4/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
29541+++ linux-3.0.4/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
29542@@ -134,6 +134,7 @@
29543 #define CHIPREV_ID_5750_A0 0x4000
29544 #define CHIPREV_ID_5750_A1 0x4001
29545 #define CHIPREV_ID_5750_A3 0x4003
29546+#define CHIPREV_ID_5750_C1 0x4201
29547 #define CHIPREV_ID_5750_C2 0x4202
29548 #define CHIPREV_ID_5752_A0_HW 0x5000
29549 #define CHIPREV_ID_5752_A0 0x6000
29550diff -urNp linux-3.0.4/drivers/net/tokenring/abyss.c linux-3.0.4/drivers/net/tokenring/abyss.c
29551--- linux-3.0.4/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
29552+++ linux-3.0.4/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
29553@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
29554
29555 static int __init abyss_init (void)
29556 {
29557- abyss_netdev_ops = tms380tr_netdev_ops;
29558+ pax_open_kernel();
29559+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29560
29561- abyss_netdev_ops.ndo_open = abyss_open;
29562- abyss_netdev_ops.ndo_stop = abyss_close;
29563+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
29564+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
29565+ pax_close_kernel();
29566
29567 return pci_register_driver(&abyss_driver);
29568 }
29569diff -urNp linux-3.0.4/drivers/net/tokenring/madgemc.c linux-3.0.4/drivers/net/tokenring/madgemc.c
29570--- linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
29571+++ linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
29572@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
29573
29574 static int __init madgemc_init (void)
29575 {
29576- madgemc_netdev_ops = tms380tr_netdev_ops;
29577- madgemc_netdev_ops.ndo_open = madgemc_open;
29578- madgemc_netdev_ops.ndo_stop = madgemc_close;
29579+ pax_open_kernel();
29580+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29581+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
29582+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
29583+ pax_close_kernel();
29584
29585 return mca_register_driver (&madgemc_driver);
29586 }
29587diff -urNp linux-3.0.4/drivers/net/tokenring/proteon.c linux-3.0.4/drivers/net/tokenring/proteon.c
29588--- linux-3.0.4/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
29589+++ linux-3.0.4/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
29590@@ -353,9 +353,11 @@ static int __init proteon_init(void)
29591 struct platform_device *pdev;
29592 int i, num = 0, err = 0;
29593
29594- proteon_netdev_ops = tms380tr_netdev_ops;
29595- proteon_netdev_ops.ndo_open = proteon_open;
29596- proteon_netdev_ops.ndo_stop = tms380tr_close;
29597+ pax_open_kernel();
29598+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29599+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
29600+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
29601+ pax_close_kernel();
29602
29603 err = platform_driver_register(&proteon_driver);
29604 if (err)
29605diff -urNp linux-3.0.4/drivers/net/tokenring/skisa.c linux-3.0.4/drivers/net/tokenring/skisa.c
29606--- linux-3.0.4/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
29607+++ linux-3.0.4/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
29608@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
29609 struct platform_device *pdev;
29610 int i, num = 0, err = 0;
29611
29612- sk_isa_netdev_ops = tms380tr_netdev_ops;
29613- sk_isa_netdev_ops.ndo_open = sk_isa_open;
29614- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29615+ pax_open_kernel();
29616+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29617+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
29618+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29619+ pax_close_kernel();
29620
29621 err = platform_driver_register(&sk_isa_driver);
29622 if (err)
29623diff -urNp linux-3.0.4/drivers/net/tulip/de2104x.c linux-3.0.4/drivers/net/tulip/de2104x.c
29624--- linux-3.0.4/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
29625+++ linux-3.0.4/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
29626@@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
29627 struct de_srom_info_leaf *il;
29628 void *bufp;
29629
29630+ pax_track_stack();
29631+
29632 /* download entire eeprom */
29633 for (i = 0; i < DE_EEPROM_WORDS; i++)
29634 ((__le16 *)ee_data)[i] =
29635diff -urNp linux-3.0.4/drivers/net/tulip/de4x5.c linux-3.0.4/drivers/net/tulip/de4x5.c
29636--- linux-3.0.4/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
29637+++ linux-3.0.4/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
29638@@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
29639 for (i=0; i<ETH_ALEN; i++) {
29640 tmp.addr[i] = dev->dev_addr[i];
29641 }
29642- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29643+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29644 break;
29645
29646 case DE4X5_SET_HWADDR: /* Set the hardware address */
29647@@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
29648 spin_lock_irqsave(&lp->lock, flags);
29649 memcpy(&statbuf, &lp->pktStats, ioc->len);
29650 spin_unlock_irqrestore(&lp->lock, flags);
29651- if (copy_to_user(ioc->data, &statbuf, ioc->len))
29652+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
29653 return -EFAULT;
29654 break;
29655 }
29656diff -urNp linux-3.0.4/drivers/net/usb/hso.c linux-3.0.4/drivers/net/usb/hso.c
29657--- linux-3.0.4/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
29658+++ linux-3.0.4/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
29659@@ -71,7 +71,7 @@
29660 #include <asm/byteorder.h>
29661 #include <linux/serial_core.h>
29662 #include <linux/serial.h>
29663-
29664+#include <asm/local.h>
29665
29666 #define MOD_AUTHOR "Option Wireless"
29667 #define MOD_DESCRIPTION "USB High Speed Option driver"
29668@@ -257,7 +257,7 @@ struct hso_serial {
29669
29670 /* from usb_serial_port */
29671 struct tty_struct *tty;
29672- int open_count;
29673+ local_t open_count;
29674 spinlock_t serial_lock;
29675
29676 int (*write_data) (struct hso_serial *serial);
29677@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
29678 struct urb *urb;
29679
29680 urb = serial->rx_urb[0];
29681- if (serial->open_count > 0) {
29682+ if (local_read(&serial->open_count) > 0) {
29683 count = put_rxbuf_data(urb, serial);
29684 if (count == -1)
29685 return;
29686@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
29687 DUMP1(urb->transfer_buffer, urb->actual_length);
29688
29689 /* Anyone listening? */
29690- if (serial->open_count == 0)
29691+ if (local_read(&serial->open_count) == 0)
29692 return;
29693
29694 if (status == 0) {
29695@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
29696 spin_unlock_irq(&serial->serial_lock);
29697
29698 /* check for port already opened, if not set the termios */
29699- serial->open_count++;
29700- if (serial->open_count == 1) {
29701+ if (local_inc_return(&serial->open_count) == 1) {
29702 serial->rx_state = RX_IDLE;
29703 /* Force default termio settings */
29704 _hso_serial_set_termios(tty, NULL);
29705@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
29706 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
29707 if (result) {
29708 hso_stop_serial_device(serial->parent);
29709- serial->open_count--;
29710+ local_dec(&serial->open_count);
29711 kref_put(&serial->parent->ref, hso_serial_ref_free);
29712 }
29713 } else {
29714@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
29715
29716 /* reset the rts and dtr */
29717 /* do the actual close */
29718- serial->open_count--;
29719+ local_dec(&serial->open_count);
29720
29721- if (serial->open_count <= 0) {
29722- serial->open_count = 0;
29723+ if (local_read(&serial->open_count) <= 0) {
29724+ local_set(&serial->open_count, 0);
29725 spin_lock_irq(&serial->serial_lock);
29726 if (serial->tty == tty) {
29727 serial->tty->driver_data = NULL;
29728@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
29729
29730 /* the actual setup */
29731 spin_lock_irqsave(&serial->serial_lock, flags);
29732- if (serial->open_count)
29733+ if (local_read(&serial->open_count))
29734 _hso_serial_set_termios(tty, old);
29735 else
29736 tty->termios = old;
29737@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
29738 D1("Pending read interrupt on port %d\n", i);
29739 spin_lock(&serial->serial_lock);
29740 if (serial->rx_state == RX_IDLE &&
29741- serial->open_count > 0) {
29742+ local_read(&serial->open_count) > 0) {
29743 /* Setup and send a ctrl req read on
29744 * port i */
29745 if (!serial->rx_urb_filled[0]) {
29746@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
29747 /* Start all serial ports */
29748 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
29749 if (serial_table[i] && (serial_table[i]->interface == iface)) {
29750- if (dev2ser(serial_table[i])->open_count) {
29751+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
29752 result =
29753 hso_start_serial_device(serial_table[i], GFP_NOIO);
29754 hso_kick_transmit(dev2ser(serial_table[i]));
29755diff -urNp linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c
29756--- linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
29757+++ linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
29758@@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
29759 * Return with error code if any of the queue indices
29760 * is out of range
29761 */
29762- if (p->ring_index[i] < 0 ||
29763- p->ring_index[i] >= adapter->num_rx_queues)
29764+ if (p->ring_index[i] >= adapter->num_rx_queues)
29765 return -EINVAL;
29766 }
29767
29768diff -urNp linux-3.0.4/drivers/net/vxge/vxge-config.h linux-3.0.4/drivers/net/vxge/vxge-config.h
29769--- linux-3.0.4/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
29770+++ linux-3.0.4/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
29771@@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
29772 void (*link_down)(struct __vxge_hw_device *devh);
29773 void (*crit_err)(struct __vxge_hw_device *devh,
29774 enum vxge_hw_event type, u64 ext_data);
29775-};
29776+} __no_const;
29777
29778 /*
29779 * struct __vxge_hw_blockpool_entry - Block private data structure
29780diff -urNp linux-3.0.4/drivers/net/vxge/vxge-main.c linux-3.0.4/drivers/net/vxge/vxge-main.c
29781--- linux-3.0.4/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
29782+++ linux-3.0.4/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
29783@@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
29784 struct sk_buff *completed[NR_SKB_COMPLETED];
29785 int more;
29786
29787+ pax_track_stack();
29788+
29789 do {
29790 more = 0;
29791 skb_ptr = completed;
29792@@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
29793 u8 mtable[256] = {0}; /* CPU to vpath mapping */
29794 int index;
29795
29796+ pax_track_stack();
29797+
29798 /*
29799 * Filling
29800 * - itable with bucket numbers
29801diff -urNp linux-3.0.4/drivers/net/vxge/vxge-traffic.h linux-3.0.4/drivers/net/vxge/vxge-traffic.h
29802--- linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
29803+++ linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
29804@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
29805 struct vxge_hw_mempool_dma *dma_object,
29806 u32 index,
29807 u32 is_last);
29808-};
29809+} __no_const;
29810
29811 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
29812 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
29813diff -urNp linux-3.0.4/drivers/net/wan/cycx_x25.c linux-3.0.4/drivers/net/wan/cycx_x25.c
29814--- linux-3.0.4/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
29815+++ linux-3.0.4/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
29816@@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
29817 unsigned char hex[1024],
29818 * phex = hex;
29819
29820+ pax_track_stack();
29821+
29822 if (len >= (sizeof(hex) / 2))
29823 len = (sizeof(hex) / 2) - 1;
29824
29825diff -urNp linux-3.0.4/drivers/net/wan/hdlc_x25.c linux-3.0.4/drivers/net/wan/hdlc_x25.c
29826--- linux-3.0.4/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
29827+++ linux-3.0.4/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
29828@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
29829
29830 static int x25_open(struct net_device *dev)
29831 {
29832- struct lapb_register_struct cb;
29833+ static struct lapb_register_struct cb = {
29834+ .connect_confirmation = x25_connected,
29835+ .connect_indication = x25_connected,
29836+ .disconnect_confirmation = x25_disconnected,
29837+ .disconnect_indication = x25_disconnected,
29838+ .data_indication = x25_data_indication,
29839+ .data_transmit = x25_data_transmit
29840+ };
29841 int result;
29842
29843- cb.connect_confirmation = x25_connected;
29844- cb.connect_indication = x25_connected;
29845- cb.disconnect_confirmation = x25_disconnected;
29846- cb.disconnect_indication = x25_disconnected;
29847- cb.data_indication = x25_data_indication;
29848- cb.data_transmit = x25_data_transmit;
29849-
29850 result = lapb_register(dev, &cb);
29851 if (result != LAPB_OK)
29852 return result;
29853diff -urNp linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c
29854--- linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
29855+++ linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
29856@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
29857 int do_autopm = 1;
29858 DECLARE_COMPLETION_ONSTACK(notif_completion);
29859
29860+ pax_track_stack();
29861+
29862 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
29863 i2400m, ack, ack_size);
29864 BUG_ON(_ack == i2400m->bm_ack_buf);
29865diff -urNp linux-3.0.4/drivers/net/wireless/airo.c linux-3.0.4/drivers/net/wireless/airo.c
29866--- linux-3.0.4/drivers/net/wireless/airo.c 2011-09-02 18:11:21.000000000 -0400
29867+++ linux-3.0.4/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
29868@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
29869 BSSListElement * loop_net;
29870 BSSListElement * tmp_net;
29871
29872+ pax_track_stack();
29873+
29874 /* Blow away current list of scan results */
29875 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
29876 list_move_tail (&loop_net->list, &ai->network_free_list);
29877@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
29878 WepKeyRid wkr;
29879 int rc;
29880
29881+ pax_track_stack();
29882+
29883 memset( &mySsid, 0, sizeof( mySsid ) );
29884 kfree (ai->flash);
29885 ai->flash = NULL;
29886@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
29887 __le32 *vals = stats.vals;
29888 int len;
29889
29890+ pax_track_stack();
29891+
29892 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29893 return -ENOMEM;
29894 data = file->private_data;
29895@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
29896 /* If doLoseSync is not 1, we won't do a Lose Sync */
29897 int doLoseSync = -1;
29898
29899+ pax_track_stack();
29900+
29901 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29902 return -ENOMEM;
29903 data = file->private_data;
29904@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
29905 int i;
29906 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
29907
29908+ pax_track_stack();
29909+
29910 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
29911 if (!qual)
29912 return -ENOMEM;
29913@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
29914 CapabilityRid cap_rid;
29915 __le32 *vals = stats_rid.vals;
29916
29917+ pax_track_stack();
29918+
29919 /* Get stats out of the card */
29920 clear_bit(JOB_WSTATS, &local->jobs);
29921 if (local->power.event) {
29922diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c
29923--- linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
29924+++ linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
29925@@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
29926 unsigned int v;
29927 u64 tsf;
29928
29929+ pax_track_stack();
29930+
29931 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
29932 len += snprintf(buf+len, sizeof(buf)-len,
29933 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
29934@@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
29935 unsigned int len = 0;
29936 unsigned int i;
29937
29938+ pax_track_stack();
29939+
29940 len += snprintf(buf+len, sizeof(buf)-len,
29941 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
29942
29943@@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
29944 unsigned int i;
29945 unsigned int v;
29946
29947+ pax_track_stack();
29948+
29949 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
29950 sc->ah->ah_ant_mode);
29951 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
29952@@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
29953 unsigned int len = 0;
29954 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
29955
29956+ pax_track_stack();
29957+
29958 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
29959 sc->bssidmask);
29960 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
29961@@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
29962 unsigned int len = 0;
29963 int i;
29964
29965+ pax_track_stack();
29966+
29967 len += snprintf(buf+len, sizeof(buf)-len,
29968 "RX\n---------------------\n");
29969 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
29970@@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
29971 char buf[700];
29972 unsigned int len = 0;
29973
29974+ pax_track_stack();
29975+
29976 len += snprintf(buf+len, sizeof(buf)-len,
29977 "HW has PHY error counters:\t%s\n",
29978 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
29979@@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
29980 struct ath5k_buf *bf, *bf0;
29981 int i, n;
29982
29983+ pax_track_stack();
29984+
29985 len += snprintf(buf+len, sizeof(buf)-len,
29986 "available txbuffers: %d\n", sc->txbuf_len);
29987
29988diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c
29989--- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
29990+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
29991@@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
29992 int i, im, j;
29993 int nmeasurement;
29994
29995+ pax_track_stack();
29996+
29997 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
29998 if (ah->txchainmask & (1 << i))
29999 num_chains++;
30000diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
30001--- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
30002+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
30003@@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
30004 int theta_low_bin = 0;
30005 int i;
30006
30007+ pax_track_stack();
30008+
30009 /* disregard any bin that contains <= 16 samples */
30010 thresh_accum_cnt = 16;
30011 scale_factor = 5;
30012diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c
30013--- linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
30014+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
30015@@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
30016 char buf[512];
30017 unsigned int len = 0;
30018
30019+ pax_track_stack();
30020+
30021 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30022 len += snprintf(buf + len, sizeof(buf) - len,
30023 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30024@@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
30025 u8 addr[ETH_ALEN];
30026 u32 tmp;
30027
30028+ pax_track_stack();
30029+
30030 len += snprintf(buf + len, sizeof(buf) - len,
30031 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
30032 wiphy_name(sc->hw->wiphy),
30033diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
30034--- linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
30035+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
30036@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
30037 unsigned int len = 0;
30038 int ret = 0;
30039
30040+ pax_track_stack();
30041+
30042 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30043
30044 ath9k_htc_ps_wakeup(priv);
30045@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
30046 unsigned int len = 0;
30047 int ret = 0;
30048
30049+ pax_track_stack();
30050+
30051 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30052
30053 ath9k_htc_ps_wakeup(priv);
30054@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
30055 unsigned int len = 0;
30056 int ret = 0;
30057
30058+ pax_track_stack();
30059+
30060 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30061
30062 ath9k_htc_ps_wakeup(priv);
30063@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
30064 char buf[512];
30065 unsigned int len = 0;
30066
30067+ pax_track_stack();
30068+
30069 len += snprintf(buf + len, sizeof(buf) - len,
30070 "%20s : %10u\n", "Buffers queued",
30071 priv->debug.tx_stats.buf_queued);
30072@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
30073 char buf[512];
30074 unsigned int len = 0;
30075
30076+ pax_track_stack();
30077+
30078 spin_lock_bh(&priv->tx.tx_lock);
30079
30080 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
30081@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
30082 char buf[512];
30083 unsigned int len = 0;
30084
30085+ pax_track_stack();
30086+
30087 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
30088 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
30089
30090diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h
30091--- linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h 2011-09-02 18:11:21.000000000 -0400
30092+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
30093@@ -585,7 +585,7 @@ struct ath_hw_private_ops {
30094
30095 /* ANI */
30096 void (*ani_cache_ini_regs)(struct ath_hw *ah);
30097-};
30098+} __no_const;
30099
30100 /**
30101 * struct ath_hw_ops - callbacks used by hardware code and driver code
30102@@ -637,7 +637,7 @@ struct ath_hw_ops {
30103 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
30104 struct ath_hw_antcomb_conf *antconf);
30105
30106-};
30107+} __no_const;
30108
30109 struct ath_nf_limits {
30110 s16 max;
30111@@ -650,7 +650,7 @@ struct ath_nf_limits {
30112 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
30113
30114 struct ath_hw {
30115- struct ath_ops reg_ops;
30116+ ath_ops_no_const reg_ops;
30117
30118 struct ieee80211_hw *hw;
30119 struct ath_common common;
30120diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath.h linux-3.0.4/drivers/net/wireless/ath/ath.h
30121--- linux-3.0.4/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
30122+++ linux-3.0.4/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
30123@@ -121,6 +121,7 @@ struct ath_ops {
30124 void (*write_flush) (void *);
30125 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
30126 };
30127+typedef struct ath_ops __no_const ath_ops_no_const;
30128
30129 struct ath_common;
30130 struct ath_bus_ops;
30131diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c
30132--- linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
30133+++ linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
30134@@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30135 int err;
30136 DECLARE_SSID_BUF(ssid);
30137
30138+ pax_track_stack();
30139+
30140 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30141
30142 if (ssid_len)
30143@@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30144 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30145 int err;
30146
30147+ pax_track_stack();
30148+
30149 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30150 idx, keylen, len);
30151
30152diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c
30153--- linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
30154+++ linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
30155@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30156 unsigned long flags;
30157 DECLARE_SSID_BUF(ssid);
30158
30159+ pax_track_stack();
30160+
30161 LIBIPW_DEBUG_SCAN("'%s' (%pM"
30162 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30163 print_ssid(ssid, info_element->data, info_element->len),
30164diff -urNp linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c
30165--- linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-07-21 22:17:23.000000000 -0400
30166+++ linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-23 21:47:55.000000000 -0400
30167@@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
30168 */
30169 if (iwl3945_mod_params.disable_hw_scan) {
30170 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30171- iwl3945_hw_ops.hw_scan = NULL;
30172+ pax_open_kernel();
30173+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30174+ pax_close_kernel();
30175 }
30176
30177 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30178diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30179--- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
30180+++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
30181@@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
30182 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30183 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30184
30185+ pax_track_stack();
30186+
30187 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30188
30189 /* Treat uninitialized rate scaling data same as non-existing. */
30190@@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
30191 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30192 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30193
30194+ pax_track_stack();
30195+
30196 /* Override starting rate (index 0) if needed for debug purposes */
30197 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30198
30199diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30200--- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
30201+++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
30202@@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
30203 int pos = 0;
30204 const size_t bufsz = sizeof(buf);
30205
30206+ pax_track_stack();
30207+
30208 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30209 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30210 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30211@@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30212 char buf[256 * NUM_IWL_RXON_CTX];
30213 const size_t bufsz = sizeof(buf);
30214
30215+ pax_track_stack();
30216+
30217 for_each_context(priv, ctx) {
30218 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30219 ctx->ctxid);
30220diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h
30221--- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
30222+++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
30223@@ -68,8 +68,8 @@ do {
30224 } while (0)
30225
30226 #else
30227-#define IWL_DEBUG(__priv, level, fmt, args...)
30228-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30229+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30230+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30231 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30232 const void *p, u32 len)
30233 {}
30234diff -urNp linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c
30235--- linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
30236+++ linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
30237@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30238 int buf_len = 512;
30239 size_t len = 0;
30240
30241+ pax_track_stack();
30242+
30243 if (*ppos != 0)
30244 return 0;
30245 if (count < sizeof(buf))
30246diff -urNp linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c
30247--- linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
30248+++ linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
30249@@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30250 return -EINVAL;
30251
30252 if (fake_hw_scan) {
30253- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30254- mac80211_hwsim_ops.sw_scan_start = NULL;
30255- mac80211_hwsim_ops.sw_scan_complete = NULL;
30256+ pax_open_kernel();
30257+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30258+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30259+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30260+ pax_close_kernel();
30261 }
30262
30263 spin_lock_init(&hwsim_radio_lock);
30264diff -urNp linux-3.0.4/drivers/net/wireless/rndis_wlan.c linux-3.0.4/drivers/net/wireless/rndis_wlan.c
30265--- linux-3.0.4/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
30266+++ linux-3.0.4/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
30267@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30268
30269 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30270
30271- if (rts_threshold < 0 || rts_threshold > 2347)
30272+ if (rts_threshold > 2347)
30273 rts_threshold = 2347;
30274
30275 tmp = cpu_to_le32(rts_threshold);
30276diff -urNp linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30277--- linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
30278+++ linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
30279@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
30280 u8 rfpath;
30281 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30282
30283+ pax_track_stack();
30284+
30285 precommoncmdcnt = 0;
30286 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30287 MAX_PRECMD_CNT,
30288diff -urNp linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h
30289--- linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
30290+++ linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
30291@@ -266,7 +266,7 @@ struct wl1251_if_operations {
30292 void (*reset)(struct wl1251 *wl);
30293 void (*enable_irq)(struct wl1251 *wl);
30294 void (*disable_irq)(struct wl1251 *wl);
30295-};
30296+} __no_const;
30297
30298 struct wl1251 {
30299 struct ieee80211_hw *hw;
30300diff -urNp linux-3.0.4/drivers/net/wireless/wl12xx/spi.c linux-3.0.4/drivers/net/wireless/wl12xx/spi.c
30301--- linux-3.0.4/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
30302+++ linux-3.0.4/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
30303@@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30304 u32 chunk_len;
30305 int i;
30306
30307+ pax_track_stack();
30308+
30309 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30310
30311 spi_message_init(&m);
30312diff -urNp linux-3.0.4/drivers/oprofile/buffer_sync.c linux-3.0.4/drivers/oprofile/buffer_sync.c
30313--- linux-3.0.4/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
30314+++ linux-3.0.4/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
30315@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30316 if (cookie == NO_COOKIE)
30317 offset = pc;
30318 if (cookie == INVALID_COOKIE) {
30319- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30320+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30321 offset = pc;
30322 }
30323 if (cookie != last_cookie) {
30324@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30325 /* add userspace sample */
30326
30327 if (!mm) {
30328- atomic_inc(&oprofile_stats.sample_lost_no_mm);
30329+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30330 return 0;
30331 }
30332
30333 cookie = lookup_dcookie(mm, s->eip, &offset);
30334
30335 if (cookie == INVALID_COOKIE) {
30336- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30337+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30338 return 0;
30339 }
30340
30341@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30342 /* ignore backtraces if failed to add a sample */
30343 if (state == sb_bt_start) {
30344 state = sb_bt_ignore;
30345- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30346+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30347 }
30348 }
30349 release_mm(mm);
30350diff -urNp linux-3.0.4/drivers/oprofile/event_buffer.c linux-3.0.4/drivers/oprofile/event_buffer.c
30351--- linux-3.0.4/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
30352+++ linux-3.0.4/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
30353@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30354 }
30355
30356 if (buffer_pos == buffer_size) {
30357- atomic_inc(&oprofile_stats.event_lost_overflow);
30358+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30359 return;
30360 }
30361
30362diff -urNp linux-3.0.4/drivers/oprofile/oprof.c linux-3.0.4/drivers/oprofile/oprof.c
30363--- linux-3.0.4/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
30364+++ linux-3.0.4/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
30365@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30366 if (oprofile_ops.switch_events())
30367 return;
30368
30369- atomic_inc(&oprofile_stats.multiplex_counter);
30370+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30371 start_switch_worker();
30372 }
30373
30374diff -urNp linux-3.0.4/drivers/oprofile/oprofilefs.c linux-3.0.4/drivers/oprofile/oprofilefs.c
30375--- linux-3.0.4/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
30376+++ linux-3.0.4/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
30377@@ -186,7 +186,7 @@ static const struct file_operations atom
30378
30379
30380 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30381- char const *name, atomic_t *val)
30382+ char const *name, atomic_unchecked_t *val)
30383 {
30384 return __oprofilefs_create_file(sb, root, name,
30385 &atomic_ro_fops, 0444, val);
30386diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.c linux-3.0.4/drivers/oprofile/oprofile_stats.c
30387--- linux-3.0.4/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
30388+++ linux-3.0.4/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
30389@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30390 cpu_buf->sample_invalid_eip = 0;
30391 }
30392
30393- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30394- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30395- atomic_set(&oprofile_stats.event_lost_overflow, 0);
30396- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30397- atomic_set(&oprofile_stats.multiplex_counter, 0);
30398+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30399+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30400+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30401+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30402+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30403 }
30404
30405
30406diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.h linux-3.0.4/drivers/oprofile/oprofile_stats.h
30407--- linux-3.0.4/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
30408+++ linux-3.0.4/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
30409@@ -13,11 +13,11 @@
30410 #include <asm/atomic.h>
30411
30412 struct oprofile_stat_struct {
30413- atomic_t sample_lost_no_mm;
30414- atomic_t sample_lost_no_mapping;
30415- atomic_t bt_lost_no_mapping;
30416- atomic_t event_lost_overflow;
30417- atomic_t multiplex_counter;
30418+ atomic_unchecked_t sample_lost_no_mm;
30419+ atomic_unchecked_t sample_lost_no_mapping;
30420+ atomic_unchecked_t bt_lost_no_mapping;
30421+ atomic_unchecked_t event_lost_overflow;
30422+ atomic_unchecked_t multiplex_counter;
30423 };
30424
30425 extern struct oprofile_stat_struct oprofile_stats;
30426diff -urNp linux-3.0.4/drivers/parport/procfs.c linux-3.0.4/drivers/parport/procfs.c
30427--- linux-3.0.4/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
30428+++ linux-3.0.4/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
30429@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30430
30431 *ppos += len;
30432
30433- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30434+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30435 }
30436
30437 #ifdef CONFIG_PARPORT_1284
30438@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30439
30440 *ppos += len;
30441
30442- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30443+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30444 }
30445 #endif /* IEEE1284.3 support. */
30446
30447diff -urNp linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h
30448--- linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
30449+++ linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
30450@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30451 int (*hardware_test) (struct slot* slot, u32 value);
30452 u8 (*get_power) (struct slot* slot);
30453 int (*set_power) (struct slot* slot, int value);
30454-};
30455+} __no_const;
30456
30457 struct cpci_hp_controller {
30458 unsigned int irq;
30459diff -urNp linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c
30460--- linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
30461+++ linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
30462@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
30463
30464 void compaq_nvram_init (void __iomem *rom_start)
30465 {
30466+
30467+#ifndef CONFIG_PAX_KERNEXEC
30468 if (rom_start) {
30469 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
30470 }
30471+#endif
30472+
30473 dbg("int15 entry = %p\n", compaq_int15_entry_point);
30474
30475 /* initialize our int15 lock */
30476diff -urNp linux-3.0.4/drivers/pci/pcie/aspm.c linux-3.0.4/drivers/pci/pcie/aspm.c
30477--- linux-3.0.4/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
30478+++ linux-3.0.4/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
30479@@ -27,9 +27,9 @@
30480 #define MODULE_PARAM_PREFIX "pcie_aspm."
30481
30482 /* Note: those are not register definitions */
30483-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
30484-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
30485-#define ASPM_STATE_L1 (4) /* L1 state */
30486+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
30487+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
30488+#define ASPM_STATE_L1 (4U) /* L1 state */
30489 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
30490 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
30491
30492diff -urNp linux-3.0.4/drivers/pci/probe.c linux-3.0.4/drivers/pci/probe.c
30493--- linux-3.0.4/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
30494+++ linux-3.0.4/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
30495@@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
30496 u32 l, sz, mask;
30497 u16 orig_cmd;
30498
30499- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
30500+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
30501
30502 if (!dev->mmio_always_on) {
30503 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
30504diff -urNp linux-3.0.4/drivers/pci/proc.c linux-3.0.4/drivers/pci/proc.c
30505--- linux-3.0.4/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
30506+++ linux-3.0.4/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
30507@@ -476,7 +476,16 @@ static const struct file_operations proc
30508 static int __init pci_proc_init(void)
30509 {
30510 struct pci_dev *dev = NULL;
30511+
30512+#ifdef CONFIG_GRKERNSEC_PROC_ADD
30513+#ifdef CONFIG_GRKERNSEC_PROC_USER
30514+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
30515+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
30516+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
30517+#endif
30518+#else
30519 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
30520+#endif
30521 proc_create("devices", 0, proc_bus_pci_dir,
30522 &proc_bus_pci_dev_operations);
30523 proc_initialized = 1;
30524diff -urNp linux-3.0.4/drivers/pci/xen-pcifront.c linux-3.0.4/drivers/pci/xen-pcifront.c
30525--- linux-3.0.4/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
30526+++ linux-3.0.4/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
30527@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
30528 struct pcifront_sd *sd = bus->sysdata;
30529 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30530
30531+ pax_track_stack();
30532+
30533 if (verbose_request)
30534 dev_info(&pdev->xdev->dev,
30535 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
30536@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
30537 struct pcifront_sd *sd = bus->sysdata;
30538 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30539
30540+ pax_track_stack();
30541+
30542 if (verbose_request)
30543 dev_info(&pdev->xdev->dev,
30544 "write dev=%04x:%02x:%02x.%01x - "
30545@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
30546 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30547 struct msi_desc *entry;
30548
30549+ pax_track_stack();
30550+
30551 if (nvec > SH_INFO_MAX_VEC) {
30552 dev_err(&dev->dev, "too much vector for pci frontend: %x."
30553 " Increase SH_INFO_MAX_VEC.\n", nvec);
30554@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
30555 struct pcifront_sd *sd = dev->bus->sysdata;
30556 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30557
30558+ pax_track_stack();
30559+
30560 err = do_pci_op(pdev, &op);
30561
30562 /* What should do for error ? */
30563@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
30564 struct pcifront_sd *sd = dev->bus->sysdata;
30565 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30566
30567+ pax_track_stack();
30568+
30569 err = do_pci_op(pdev, &op);
30570 if (likely(!err)) {
30571 vector[0] = op.value;
30572diff -urNp linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c
30573--- linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
30574+++ linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
30575@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
30576 return 0;
30577 }
30578
30579-void static hotkey_mask_warn_incomplete_mask(void)
30580+static void hotkey_mask_warn_incomplete_mask(void)
30581 {
30582 /* log only what the user can fix... */
30583 const u32 wantedmask = hotkey_driver_mask &
30584diff -urNp linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c
30585--- linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
30586+++ linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
30587@@ -59,7 +59,7 @@ do { \
30588 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
30589 } while(0)
30590
30591-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
30592+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
30593 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
30594
30595 /*
30596@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
30597
30598 cpu = get_cpu();
30599 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
30600+
30601+ pax_open_kernel();
30602 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
30603+ pax_close_kernel();
30604
30605 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
30606 spin_lock_irqsave(&pnp_bios_lock, flags);
30607@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
30608 :"memory");
30609 spin_unlock_irqrestore(&pnp_bios_lock, flags);
30610
30611+ pax_open_kernel();
30612 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
30613+ pax_close_kernel();
30614+
30615 put_cpu();
30616
30617 /* If we get here and this is set then the PnP BIOS faulted on us. */
30618@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
30619 return status;
30620 }
30621
30622-void pnpbios_calls_init(union pnp_bios_install_struct *header)
30623+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
30624 {
30625 int i;
30626
30627@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
30628 pnp_bios_callpoint.offset = header->fields.pm16offset;
30629 pnp_bios_callpoint.segment = PNP_CS16;
30630
30631+ pax_open_kernel();
30632+
30633 for_each_possible_cpu(i) {
30634 struct desc_struct *gdt = get_cpu_gdt_table(i);
30635 if (!gdt)
30636@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
30637 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
30638 (unsigned long)__va(header->fields.pm16dseg));
30639 }
30640+
30641+ pax_close_kernel();
30642 }
30643diff -urNp linux-3.0.4/drivers/pnp/resource.c linux-3.0.4/drivers/pnp/resource.c
30644--- linux-3.0.4/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
30645+++ linux-3.0.4/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
30646@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
30647 return 1;
30648
30649 /* check if the resource is valid */
30650- if (*irq < 0 || *irq > 15)
30651+ if (*irq > 15)
30652 return 0;
30653
30654 /* check if the resource is reserved */
30655@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
30656 return 1;
30657
30658 /* check if the resource is valid */
30659- if (*dma < 0 || *dma == 4 || *dma > 7)
30660+ if (*dma == 4 || *dma > 7)
30661 return 0;
30662
30663 /* check if the resource is reserved */
30664diff -urNp linux-3.0.4/drivers/power/bq27x00_battery.c linux-3.0.4/drivers/power/bq27x00_battery.c
30665--- linux-3.0.4/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
30666+++ linux-3.0.4/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
30667@@ -67,7 +67,7 @@
30668 struct bq27x00_device_info;
30669 struct bq27x00_access_methods {
30670 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
30671-};
30672+} __no_const;
30673
30674 enum bq27x00_chip { BQ27000, BQ27500 };
30675
30676diff -urNp linux-3.0.4/drivers/regulator/max8660.c linux-3.0.4/drivers/regulator/max8660.c
30677--- linux-3.0.4/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
30678+++ linux-3.0.4/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
30679@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
30680 max8660->shadow_regs[MAX8660_OVER1] = 5;
30681 } else {
30682 /* Otherwise devices can be toggled via software */
30683- max8660_dcdc_ops.enable = max8660_dcdc_enable;
30684- max8660_dcdc_ops.disable = max8660_dcdc_disable;
30685+ pax_open_kernel();
30686+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
30687+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
30688+ pax_close_kernel();
30689 }
30690
30691 /*
30692diff -urNp linux-3.0.4/drivers/regulator/mc13892-regulator.c linux-3.0.4/drivers/regulator/mc13892-regulator.c
30693--- linux-3.0.4/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
30694+++ linux-3.0.4/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
30695@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
30696 }
30697 mc13xxx_unlock(mc13892);
30698
30699- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30700+ pax_open_kernel();
30701+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30702 = mc13892_vcam_set_mode;
30703- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30704+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30705 = mc13892_vcam_get_mode;
30706+ pax_close_kernel();
30707 for (i = 0; i < pdata->num_regulators; i++) {
30708 init_data = &pdata->regulators[i];
30709 priv->regulators[i] = regulator_register(
30710diff -urNp linux-3.0.4/drivers/rtc/rtc-dev.c linux-3.0.4/drivers/rtc/rtc-dev.c
30711--- linux-3.0.4/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
30712+++ linux-3.0.4/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
30713@@ -14,6 +14,7 @@
30714 #include <linux/module.h>
30715 #include <linux/rtc.h>
30716 #include <linux/sched.h>
30717+#include <linux/grsecurity.h>
30718 #include "rtc-core.h"
30719
30720 static dev_t rtc_devt;
30721@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
30722 if (copy_from_user(&tm, uarg, sizeof(tm)))
30723 return -EFAULT;
30724
30725+ gr_log_timechange();
30726+
30727 return rtc_set_time(rtc, &tm);
30728
30729 case RTC_PIE_ON:
30730diff -urNp linux-3.0.4/drivers/scsi/aacraid/aacraid.h linux-3.0.4/drivers/scsi/aacraid/aacraid.h
30731--- linux-3.0.4/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
30732+++ linux-3.0.4/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
30733@@ -492,7 +492,7 @@ struct adapter_ops
30734 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
30735 /* Administrative operations */
30736 int (*adapter_comm)(struct aac_dev * dev, int comm);
30737-};
30738+} __no_const;
30739
30740 /*
30741 * Define which interrupt handler needs to be installed
30742diff -urNp linux-3.0.4/drivers/scsi/aacraid/commctrl.c linux-3.0.4/drivers/scsi/aacraid/commctrl.c
30743--- linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
30744+++ linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
30745@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
30746 u32 actual_fibsize64, actual_fibsize = 0;
30747 int i;
30748
30749+ pax_track_stack();
30750
30751 if (dev->in_reset) {
30752 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
30753diff -urNp linux-3.0.4/drivers/scsi/bfa/bfad.c linux-3.0.4/drivers/scsi/bfa/bfad.c
30754--- linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
30755+++ linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
30756@@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
30757 struct bfad_vport_s *vport, *vport_new;
30758 struct bfa_fcs_driver_info_s driver_info;
30759
30760+ pax_track_stack();
30761+
30762 /* Fill the driver_info info to fcs*/
30763 memset(&driver_info, 0, sizeof(driver_info));
30764 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
30765diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c
30766--- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
30767+++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
30768@@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
30769 u16 len, count;
30770 u16 templen;
30771
30772+ pax_track_stack();
30773+
30774 /*
30775 * get hba attributes
30776 */
30777@@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
30778 u8 count = 0;
30779 u16 templen;
30780
30781+ pax_track_stack();
30782+
30783 /*
30784 * get port attributes
30785 */
30786diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c
30787--- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
30788+++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
30789@@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
30790 struct fc_rpsc_speed_info_s speeds;
30791 struct bfa_port_attr_s pport_attr;
30792
30793+ pax_track_stack();
30794+
30795 bfa_trc(port->fcs, rx_fchs->s_id);
30796 bfa_trc(port->fcs, rx_fchs->d_id);
30797
30798diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa.h linux-3.0.4/drivers/scsi/bfa/bfa.h
30799--- linux-3.0.4/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
30800+++ linux-3.0.4/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
30801@@ -238,7 +238,7 @@ struct bfa_hwif_s {
30802 u32 *nvecs, u32 *maxvec);
30803 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
30804 u32 *end);
30805-};
30806+} __no_const;
30807 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
30808
30809 struct bfa_iocfc_s {
30810diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h
30811--- linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
30812+++ linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
30813@@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
30814 bfa_ioc_disable_cbfn_t disable_cbfn;
30815 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
30816 bfa_ioc_reset_cbfn_t reset_cbfn;
30817-};
30818+} __no_const;
30819
30820 /*
30821 * Heartbeat failure notification queue element.
30822@@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
30823 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
30824 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
30825 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
30826-};
30827+} __no_const;
30828
30829 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
30830 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
30831diff -urNp linux-3.0.4/drivers/scsi/BusLogic.c linux-3.0.4/drivers/scsi/BusLogic.c
30832--- linux-3.0.4/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
30833+++ linux-3.0.4/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
30834@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
30835 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
30836 *PrototypeHostAdapter)
30837 {
30838+ pax_track_stack();
30839+
30840 /*
30841 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
30842 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
30843diff -urNp linux-3.0.4/drivers/scsi/dpt_i2o.c linux-3.0.4/drivers/scsi/dpt_i2o.c
30844--- linux-3.0.4/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
30845+++ linux-3.0.4/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
30846@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
30847 dma_addr_t addr;
30848 ulong flags = 0;
30849
30850+ pax_track_stack();
30851+
30852 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
30853 // get user msg size in u32s
30854 if(get_user(size, &user_msg[0])){
30855@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
30856 s32 rcode;
30857 dma_addr_t addr;
30858
30859+ pax_track_stack();
30860+
30861 memset(msg, 0 , sizeof(msg));
30862 len = scsi_bufflen(cmd);
30863 direction = 0x00000000;
30864diff -urNp linux-3.0.4/drivers/scsi/eata.c linux-3.0.4/drivers/scsi/eata.c
30865--- linux-3.0.4/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
30866+++ linux-3.0.4/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
30867@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
30868 struct hostdata *ha;
30869 char name[16];
30870
30871+ pax_track_stack();
30872+
30873 sprintf(name, "%s%d", driver_name, j);
30874
30875 if (!request_region(port_base, REGION_SIZE, driver_name)) {
30876diff -urNp linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c
30877--- linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
30878+++ linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
30879@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
30880 } buf;
30881 int rc;
30882
30883+ pax_track_stack();
30884+
30885 fiph = (struct fip_header *)skb->data;
30886 sub = fiph->fip_subcode;
30887
30888diff -urNp linux-3.0.4/drivers/scsi/gdth.c linux-3.0.4/drivers/scsi/gdth.c
30889--- linux-3.0.4/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
30890+++ linux-3.0.4/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
30891@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
30892 unsigned long flags;
30893 gdth_ha_str *ha;
30894
30895+ pax_track_stack();
30896+
30897 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
30898 return -EFAULT;
30899 ha = gdth_find_ha(ldrv.ionode);
30900@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
30901 gdth_ha_str *ha;
30902 int rval;
30903
30904+ pax_track_stack();
30905+
30906 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
30907 res.number >= MAX_HDRIVES)
30908 return -EFAULT;
30909@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
30910 gdth_ha_str *ha;
30911 int rval;
30912
30913+ pax_track_stack();
30914+
30915 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
30916 return -EFAULT;
30917 ha = gdth_find_ha(gen.ionode);
30918@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
30919 int i;
30920 gdth_cmd_str gdtcmd;
30921 char cmnd[MAX_COMMAND_SIZE];
30922+
30923+ pax_track_stack();
30924+
30925 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
30926
30927 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
30928diff -urNp linux-3.0.4/drivers/scsi/gdth_proc.c linux-3.0.4/drivers/scsi/gdth_proc.c
30929--- linux-3.0.4/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
30930+++ linux-3.0.4/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
30931@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
30932 u64 paddr;
30933
30934 char cmnd[MAX_COMMAND_SIZE];
30935+
30936+ pax_track_stack();
30937+
30938 memset(cmnd, 0xff, 12);
30939 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
30940
30941@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
30942 gdth_hget_str *phg;
30943 char cmnd[MAX_COMMAND_SIZE];
30944
30945+ pax_track_stack();
30946+
30947 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
30948 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
30949 if (!gdtcmd || !estr)
30950diff -urNp linux-3.0.4/drivers/scsi/hosts.c linux-3.0.4/drivers/scsi/hosts.c
30951--- linux-3.0.4/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
30952+++ linux-3.0.4/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
30953@@ -42,7 +42,7 @@
30954 #include "scsi_logging.h"
30955
30956
30957-static atomic_t scsi_host_next_hn; /* host_no for next new host */
30958+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
30959
30960
30961 static void scsi_host_cls_release(struct device *dev)
30962@@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
30963 * subtract one because we increment first then return, but we need to
30964 * know what the next host number was before increment
30965 */
30966- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
30967+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
30968 shost->dma_channel = 0xff;
30969
30970 /* These three are default values which can be overridden */
30971diff -urNp linux-3.0.4/drivers/scsi/hpsa.c linux-3.0.4/drivers/scsi/hpsa.c
30972--- linux-3.0.4/drivers/scsi/hpsa.c 2011-07-21 22:17:23.000000000 -0400
30973+++ linux-3.0.4/drivers/scsi/hpsa.c 2011-08-23 21:47:55.000000000 -0400
30974@@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
30975 u32 a;
30976
30977 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
30978- return h->access.command_completed(h);
30979+ return h->access->command_completed(h);
30980
30981 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
30982 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
30983@@ -2938,7 +2938,7 @@ static void start_io(struct ctlr_info *h
30984 while (!list_empty(&h->reqQ)) {
30985 c = list_entry(h->reqQ.next, struct CommandList, list);
30986 /* can't do anything if fifo is full */
30987- if ((h->access.fifo_full(h))) {
30988+ if ((h->access->fifo_full(h))) {
30989 dev_warn(&h->pdev->dev, "fifo full\n");
30990 break;
30991 }
30992@@ -2948,7 +2948,7 @@ static void start_io(struct ctlr_info *h
30993 h->Qdepth--;
30994
30995 /* Tell the controller execute command */
30996- h->access.submit_command(h, c);
30997+ h->access->submit_command(h, c);
30998
30999 /* Put job onto the completed Q */
31000 addQ(&h->cmpQ, c);
31001@@ -2957,17 +2957,17 @@ static void start_io(struct ctlr_info *h
31002
31003 static inline unsigned long get_next_completion(struct ctlr_info *h)
31004 {
31005- return h->access.command_completed(h);
31006+ return h->access->command_completed(h);
31007 }
31008
31009 static inline bool interrupt_pending(struct ctlr_info *h)
31010 {
31011- return h->access.intr_pending(h);
31012+ return h->access->intr_pending(h);
31013 }
31014
31015 static inline long interrupt_not_for_us(struct ctlr_info *h)
31016 {
31017- return (h->access.intr_pending(h) == 0) ||
31018+ return (h->access->intr_pending(h) == 0) ||
31019 (h->interrupts_enabled == 0);
31020 }
31021
31022@@ -3857,7 +3857,7 @@ static int __devinit hpsa_pci_init(struc
31023 if (prod_index < 0)
31024 return -ENODEV;
31025 h->product_name = products[prod_index].product_name;
31026- h->access = *(products[prod_index].access);
31027+ h->access = products[prod_index].access;
31028
31029 if (hpsa_board_disabled(h->pdev)) {
31030 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31031@@ -4134,7 +4134,7 @@ reinit_after_soft_reset:
31032 }
31033
31034 /* make sure the board interrupts are off */
31035- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31036+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31037
31038 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
31039 goto clean2;
31040@@ -4168,7 +4168,7 @@ reinit_after_soft_reset:
31041 * fake ones to scoop up any residual completions.
31042 */
31043 spin_lock_irqsave(&h->lock, flags);
31044- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31045+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31046 spin_unlock_irqrestore(&h->lock, flags);
31047 free_irq(h->intr[h->intr_mode], h);
31048 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
31049@@ -4187,9 +4187,9 @@ reinit_after_soft_reset:
31050 dev_info(&h->pdev->dev, "Board READY.\n");
31051 dev_info(&h->pdev->dev,
31052 "Waiting for stale completions to drain.\n");
31053- h->access.set_intr_mask(h, HPSA_INTR_ON);
31054+ h->access->set_intr_mask(h, HPSA_INTR_ON);
31055 msleep(10000);
31056- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31057+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31058
31059 rc = controller_reset_failed(h->cfgtable);
31060 if (rc)
31061@@ -4210,7 +4210,7 @@ reinit_after_soft_reset:
31062 }
31063
31064 /* Turn the interrupts on so we can service requests */
31065- h->access.set_intr_mask(h, HPSA_INTR_ON);
31066+ h->access->set_intr_mask(h, HPSA_INTR_ON);
31067
31068 hpsa_hba_inquiry(h);
31069 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
31070@@ -4263,7 +4263,7 @@ static void hpsa_shutdown(struct pci_dev
31071 * To write all data in the battery backed cache to disks
31072 */
31073 hpsa_flush_cache(h);
31074- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31075+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31076 free_irq(h->intr[h->intr_mode], h);
31077 #ifdef CONFIG_PCI_MSI
31078 if (h->msix_vector)
31079@@ -4426,7 +4426,7 @@ static __devinit void hpsa_enter_perform
31080 return;
31081 }
31082 /* Change the access methods to the performant access methods */
31083- h->access = SA5_performant_access;
31084+ h->access = &SA5_performant_access;
31085 h->transMethod = CFGTBL_Trans_Performant;
31086 }
31087
31088diff -urNp linux-3.0.4/drivers/scsi/hpsa.h linux-3.0.4/drivers/scsi/hpsa.h
31089--- linux-3.0.4/drivers/scsi/hpsa.h 2011-09-02 18:11:21.000000000 -0400
31090+++ linux-3.0.4/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
31091@@ -73,7 +73,7 @@ struct ctlr_info {
31092 unsigned int msix_vector;
31093 unsigned int msi_vector;
31094 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31095- struct access_method access;
31096+ struct access_method *access;
31097
31098 /* queue and queue Info */
31099 struct list_head reqQ;
31100diff -urNp linux-3.0.4/drivers/scsi/ips.h linux-3.0.4/drivers/scsi/ips.h
31101--- linux-3.0.4/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
31102+++ linux-3.0.4/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
31103@@ -1027,7 +1027,7 @@ typedef struct {
31104 int (*intr)(struct ips_ha *);
31105 void (*enableint)(struct ips_ha *);
31106 uint32_t (*statupd)(struct ips_ha *);
31107-} ips_hw_func_t;
31108+} __no_const ips_hw_func_t;
31109
31110 typedef struct ips_ha {
31111 uint8_t ha_id[IPS_MAX_CHANNELS+1];
31112diff -urNp linux-3.0.4/drivers/scsi/libfc/fc_exch.c linux-3.0.4/drivers/scsi/libfc/fc_exch.c
31113--- linux-3.0.4/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
31114+++ linux-3.0.4/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
31115@@ -105,12 +105,12 @@ struct fc_exch_mgr {
31116 * all together if not used XXX
31117 */
31118 struct {
31119- atomic_t no_free_exch;
31120- atomic_t no_free_exch_xid;
31121- atomic_t xid_not_found;
31122- atomic_t xid_busy;
31123- atomic_t seq_not_found;
31124- atomic_t non_bls_resp;
31125+ atomic_unchecked_t no_free_exch;
31126+ atomic_unchecked_t no_free_exch_xid;
31127+ atomic_unchecked_t xid_not_found;
31128+ atomic_unchecked_t xid_busy;
31129+ atomic_unchecked_t seq_not_found;
31130+ atomic_unchecked_t non_bls_resp;
31131 } stats;
31132 };
31133
31134@@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31135 /* allocate memory for exchange */
31136 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31137 if (!ep) {
31138- atomic_inc(&mp->stats.no_free_exch);
31139+ atomic_inc_unchecked(&mp->stats.no_free_exch);
31140 goto out;
31141 }
31142 memset(ep, 0, sizeof(*ep));
31143@@ -761,7 +761,7 @@ out:
31144 return ep;
31145 err:
31146 spin_unlock_bh(&pool->lock);
31147- atomic_inc(&mp->stats.no_free_exch_xid);
31148+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31149 mempool_free(ep, mp->ep_pool);
31150 return NULL;
31151 }
31152@@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31153 xid = ntohs(fh->fh_ox_id); /* we originated exch */
31154 ep = fc_exch_find(mp, xid);
31155 if (!ep) {
31156- atomic_inc(&mp->stats.xid_not_found);
31157+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31158 reject = FC_RJT_OX_ID;
31159 goto out;
31160 }
31161@@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31162 ep = fc_exch_find(mp, xid);
31163 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31164 if (ep) {
31165- atomic_inc(&mp->stats.xid_busy);
31166+ atomic_inc_unchecked(&mp->stats.xid_busy);
31167 reject = FC_RJT_RX_ID;
31168 goto rel;
31169 }
31170@@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31171 }
31172 xid = ep->xid; /* get our XID */
31173 } else if (!ep) {
31174- atomic_inc(&mp->stats.xid_not_found);
31175+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31176 reject = FC_RJT_RX_ID; /* XID not found */
31177 goto out;
31178 }
31179@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31180 } else {
31181 sp = &ep->seq;
31182 if (sp->id != fh->fh_seq_id) {
31183- atomic_inc(&mp->stats.seq_not_found);
31184+ atomic_inc_unchecked(&mp->stats.seq_not_found);
31185 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31186 goto rel;
31187 }
31188@@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31189
31190 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31191 if (!ep) {
31192- atomic_inc(&mp->stats.xid_not_found);
31193+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31194 goto out;
31195 }
31196 if (ep->esb_stat & ESB_ST_COMPLETE) {
31197- atomic_inc(&mp->stats.xid_not_found);
31198+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31199 goto rel;
31200 }
31201 if (ep->rxid == FC_XID_UNKNOWN)
31202 ep->rxid = ntohs(fh->fh_rx_id);
31203 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31204- atomic_inc(&mp->stats.xid_not_found);
31205+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31206 goto rel;
31207 }
31208 if (ep->did != ntoh24(fh->fh_s_id) &&
31209 ep->did != FC_FID_FLOGI) {
31210- atomic_inc(&mp->stats.xid_not_found);
31211+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31212 goto rel;
31213 }
31214 sof = fr_sof(fp);
31215@@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31216 sp->ssb_stat |= SSB_ST_RESP;
31217 sp->id = fh->fh_seq_id;
31218 } else if (sp->id != fh->fh_seq_id) {
31219- atomic_inc(&mp->stats.seq_not_found);
31220+ atomic_inc_unchecked(&mp->stats.seq_not_found);
31221 goto rel;
31222 }
31223
31224@@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
31225 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31226
31227 if (!sp)
31228- atomic_inc(&mp->stats.xid_not_found);
31229+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31230 else
31231- atomic_inc(&mp->stats.non_bls_resp);
31232+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
31233
31234 fc_frame_free(fp);
31235 }
31236diff -urNp linux-3.0.4/drivers/scsi/libsas/sas_ata.c linux-3.0.4/drivers/scsi/libsas/sas_ata.c
31237--- linux-3.0.4/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
31238+++ linux-3.0.4/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
31239@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
31240 .postreset = ata_std_postreset,
31241 .error_handler = ata_std_error_handler,
31242 .post_internal_cmd = sas_ata_post_internal,
31243- .qc_defer = ata_std_qc_defer,
31244+ .qc_defer = ata_std_qc_defer,
31245 .qc_prep = ata_noop_qc_prep,
31246 .qc_issue = sas_ata_qc_issue,
31247 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31248diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c
31249--- linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
31250+++ linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
31251@@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31252
31253 #include <linux/debugfs.h>
31254
31255-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31256+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31257 static unsigned long lpfc_debugfs_start_time = 0L;
31258
31259 /* iDiag */
31260@@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31261 lpfc_debugfs_enable = 0;
31262
31263 len = 0;
31264- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31265+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31266 (lpfc_debugfs_max_disc_trc - 1);
31267 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31268 dtp = vport->disc_trc + i;
31269@@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31270 lpfc_debugfs_enable = 0;
31271
31272 len = 0;
31273- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31274+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31275 (lpfc_debugfs_max_slow_ring_trc - 1);
31276 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31277 dtp = phba->slow_ring_trc + i;
31278@@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31279 uint32_t *ptr;
31280 char buffer[1024];
31281
31282+ pax_track_stack();
31283+
31284 off = 0;
31285 spin_lock_irq(&phba->hbalock);
31286
31287@@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31288 !vport || !vport->disc_trc)
31289 return;
31290
31291- index = atomic_inc_return(&vport->disc_trc_cnt) &
31292+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31293 (lpfc_debugfs_max_disc_trc - 1);
31294 dtp = vport->disc_trc + index;
31295 dtp->fmt = fmt;
31296 dtp->data1 = data1;
31297 dtp->data2 = data2;
31298 dtp->data3 = data3;
31299- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31300+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31301 dtp->jif = jiffies;
31302 #endif
31303 return;
31304@@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31305 !phba || !phba->slow_ring_trc)
31306 return;
31307
31308- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31309+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31310 (lpfc_debugfs_max_slow_ring_trc - 1);
31311 dtp = phba->slow_ring_trc + index;
31312 dtp->fmt = fmt;
31313 dtp->data1 = data1;
31314 dtp->data2 = data2;
31315 dtp->data3 = data3;
31316- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31317+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31318 dtp->jif = jiffies;
31319 #endif
31320 return;
31321@@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31322 "slow_ring buffer\n");
31323 goto debug_failed;
31324 }
31325- atomic_set(&phba->slow_ring_trc_cnt, 0);
31326+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31327 memset(phba->slow_ring_trc, 0,
31328 (sizeof(struct lpfc_debugfs_trc) *
31329 lpfc_debugfs_max_slow_ring_trc));
31330@@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31331 "buffer\n");
31332 goto debug_failed;
31333 }
31334- atomic_set(&vport->disc_trc_cnt, 0);
31335+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31336
31337 snprintf(name, sizeof(name), "discovery_trace");
31338 vport->debug_disc_trc =
31339diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc.h linux-3.0.4/drivers/scsi/lpfc/lpfc.h
31340--- linux-3.0.4/drivers/scsi/lpfc/lpfc.h 2011-07-21 22:17:23.000000000 -0400
31341+++ linux-3.0.4/drivers/scsi/lpfc/lpfc.h 2011-08-23 21:47:55.000000000 -0400
31342@@ -420,7 +420,7 @@ struct lpfc_vport {
31343 struct dentry *debug_nodelist;
31344 struct dentry *vport_debugfs_root;
31345 struct lpfc_debugfs_trc *disc_trc;
31346- atomic_t disc_trc_cnt;
31347+ atomic_unchecked_t disc_trc_cnt;
31348 #endif
31349 uint8_t stat_data_enabled;
31350 uint8_t stat_data_blocked;
31351@@ -826,8 +826,8 @@ struct lpfc_hba {
31352 struct timer_list fabric_block_timer;
31353 unsigned long bit_flags;
31354 #define FABRIC_COMANDS_BLOCKED 0
31355- atomic_t num_rsrc_err;
31356- atomic_t num_cmd_success;
31357+ atomic_unchecked_t num_rsrc_err;
31358+ atomic_unchecked_t num_cmd_success;
31359 unsigned long last_rsrc_error_time;
31360 unsigned long last_ramp_down_time;
31361 unsigned long last_ramp_up_time;
31362@@ -841,7 +841,7 @@ struct lpfc_hba {
31363 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31364 struct dentry *debug_slow_ring_trc;
31365 struct lpfc_debugfs_trc *slow_ring_trc;
31366- atomic_t slow_ring_trc_cnt;
31367+ atomic_unchecked_t slow_ring_trc_cnt;
31368 /* iDiag debugfs sub-directory */
31369 struct dentry *idiag_root;
31370 struct dentry *idiag_pci_cfg;
31371diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c
31372--- linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c 2011-07-21 22:17:23.000000000 -0400
31373+++ linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c 2011-08-23 21:47:56.000000000 -0400
31374@@ -9923,8 +9923,10 @@ lpfc_init(void)
31375 printk(LPFC_COPYRIGHT "\n");
31376
31377 if (lpfc_enable_npiv) {
31378- lpfc_transport_functions.vport_create = lpfc_vport_create;
31379- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31380+ pax_open_kernel();
31381+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31382+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31383+ pax_close_kernel();
31384 }
31385 lpfc_transport_template =
31386 fc_attach_transport(&lpfc_transport_functions);
31387diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c
31388--- linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-07-21 22:17:23.000000000 -0400
31389+++ linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-23 21:47:56.000000000 -0400
31390@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31391 uint32_t evt_posted;
31392
31393 spin_lock_irqsave(&phba->hbalock, flags);
31394- atomic_inc(&phba->num_rsrc_err);
31395+ atomic_inc_unchecked(&phba->num_rsrc_err);
31396 phba->last_rsrc_error_time = jiffies;
31397
31398 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31399@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31400 unsigned long flags;
31401 struct lpfc_hba *phba = vport->phba;
31402 uint32_t evt_posted;
31403- atomic_inc(&phba->num_cmd_success);
31404+ atomic_inc_unchecked(&phba->num_cmd_success);
31405
31406 if (vport->cfg_lun_queue_depth <= queue_depth)
31407 return;
31408@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31409 unsigned long num_rsrc_err, num_cmd_success;
31410 int i;
31411
31412- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31413- num_cmd_success = atomic_read(&phba->num_cmd_success);
31414+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31415+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31416
31417 vports = lpfc_create_vport_work_array(phba);
31418 if (vports != NULL)
31419@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31420 }
31421 }
31422 lpfc_destroy_vport_work_array(phba, vports);
31423- atomic_set(&phba->num_rsrc_err, 0);
31424- atomic_set(&phba->num_cmd_success, 0);
31425+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
31426+ atomic_set_unchecked(&phba->num_cmd_success, 0);
31427 }
31428
31429 /**
31430@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31431 }
31432 }
31433 lpfc_destroy_vport_work_array(phba, vports);
31434- atomic_set(&phba->num_rsrc_err, 0);
31435- atomic_set(&phba->num_cmd_success, 0);
31436+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
31437+ atomic_set_unchecked(&phba->num_cmd_success, 0);
31438 }
31439
31440 /**
31441diff -urNp linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c
31442--- linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
31443+++ linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
31444@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
31445 int rval;
31446 int i;
31447
31448+ pax_track_stack();
31449+
31450 // Allocate memory for the base list of scb for management module.
31451 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31452
31453diff -urNp linux-3.0.4/drivers/scsi/osd/osd_initiator.c linux-3.0.4/drivers/scsi/osd/osd_initiator.c
31454--- linux-3.0.4/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
31455+++ linux-3.0.4/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
31456@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
31457 int nelem = ARRAY_SIZE(get_attrs), a = 0;
31458 int ret;
31459
31460+ pax_track_stack();
31461+
31462 or = osd_start_request(od, GFP_KERNEL);
31463 if (!or)
31464 return -ENOMEM;
31465diff -urNp linux-3.0.4/drivers/scsi/pmcraid.c linux-3.0.4/drivers/scsi/pmcraid.c
31466--- linux-3.0.4/drivers/scsi/pmcraid.c 2011-09-02 18:11:21.000000000 -0400
31467+++ linux-3.0.4/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
31468@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
31469 res->scsi_dev = scsi_dev;
31470 scsi_dev->hostdata = res;
31471 res->change_detected = 0;
31472- atomic_set(&res->read_failures, 0);
31473- atomic_set(&res->write_failures, 0);
31474+ atomic_set_unchecked(&res->read_failures, 0);
31475+ atomic_set_unchecked(&res->write_failures, 0);
31476 rc = 0;
31477 }
31478 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
31479@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
31480
31481 /* If this was a SCSI read/write command keep count of errors */
31482 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
31483- atomic_inc(&res->read_failures);
31484+ atomic_inc_unchecked(&res->read_failures);
31485 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
31486- atomic_inc(&res->write_failures);
31487+ atomic_inc_unchecked(&res->write_failures);
31488
31489 if (!RES_IS_GSCSI(res->cfg_entry) &&
31490 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
31491@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
31492 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31493 * hrrq_id assigned here in queuecommand
31494 */
31495- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31496+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31497 pinstance->num_hrrq;
31498 cmd->cmd_done = pmcraid_io_done;
31499
31500@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
31501 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31502 * hrrq_id assigned here in queuecommand
31503 */
31504- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31505+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31506 pinstance->num_hrrq;
31507
31508 if (request_size) {
31509@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
31510
31511 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
31512 /* add resources only after host is added into system */
31513- if (!atomic_read(&pinstance->expose_resources))
31514+ if (!atomic_read_unchecked(&pinstance->expose_resources))
31515 return;
31516
31517 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
31518@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
31519 init_waitqueue_head(&pinstance->reset_wait_q);
31520
31521 atomic_set(&pinstance->outstanding_cmds, 0);
31522- atomic_set(&pinstance->last_message_id, 0);
31523- atomic_set(&pinstance->expose_resources, 0);
31524+ atomic_set_unchecked(&pinstance->last_message_id, 0);
31525+ atomic_set_unchecked(&pinstance->expose_resources, 0);
31526
31527 INIT_LIST_HEAD(&pinstance->free_res_q);
31528 INIT_LIST_HEAD(&pinstance->used_res_q);
31529@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
31530 /* Schedule worker thread to handle CCN and take care of adding and
31531 * removing devices to OS
31532 */
31533- atomic_set(&pinstance->expose_resources, 1);
31534+ atomic_set_unchecked(&pinstance->expose_resources, 1);
31535 schedule_work(&pinstance->worker_q);
31536 return rc;
31537
31538diff -urNp linux-3.0.4/drivers/scsi/pmcraid.h linux-3.0.4/drivers/scsi/pmcraid.h
31539--- linux-3.0.4/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
31540+++ linux-3.0.4/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
31541@@ -749,7 +749,7 @@ struct pmcraid_instance {
31542 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
31543
31544 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
31545- atomic_t last_message_id;
31546+ atomic_unchecked_t last_message_id;
31547
31548 /* configuration table */
31549 struct pmcraid_config_table *cfg_table;
31550@@ -778,7 +778,7 @@ struct pmcraid_instance {
31551 atomic_t outstanding_cmds;
31552
31553 /* should add/delete resources to mid-layer now ?*/
31554- atomic_t expose_resources;
31555+ atomic_unchecked_t expose_resources;
31556
31557
31558
31559@@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
31560 struct pmcraid_config_table_entry_ext cfg_entry_ext;
31561 };
31562 struct scsi_device *scsi_dev; /* Link scsi_device structure */
31563- atomic_t read_failures; /* count of failed READ commands */
31564- atomic_t write_failures; /* count of failed WRITE commands */
31565+ atomic_unchecked_t read_failures; /* count of failed READ commands */
31566+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
31567
31568 /* To indicate add/delete/modify during CCN */
31569 u8 change_detected;
31570diff -urNp linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h
31571--- linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
31572+++ linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
31573@@ -2244,7 +2244,7 @@ struct isp_operations {
31574 int (*get_flash_version) (struct scsi_qla_host *, void *);
31575 int (*start_scsi) (srb_t *);
31576 int (*abort_isp) (struct scsi_qla_host *);
31577-};
31578+} __no_const;
31579
31580 /* MSI-X Support *************************************************************/
31581
31582diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h
31583--- linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
31584+++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
31585@@ -256,7 +256,7 @@ struct ddb_entry {
31586 atomic_t retry_relogin_timer; /* Min Time between relogins
31587 * (4000 only) */
31588 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
31589- atomic_t relogin_retry_count; /* Num of times relogin has been
31590+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
31591 * retried */
31592
31593 uint16_t port;
31594diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c
31595--- linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
31596+++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
31597@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
31598 ddb_entry->fw_ddb_index = fw_ddb_index;
31599 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
31600 atomic_set(&ddb_entry->relogin_timer, 0);
31601- atomic_set(&ddb_entry->relogin_retry_count, 0);
31602+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31603 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31604 list_add_tail(&ddb_entry->list, &ha->ddb_list);
31605 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
31606@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
31607 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
31608 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
31609 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31610- atomic_set(&ddb_entry->relogin_retry_count, 0);
31611+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31612 atomic_set(&ddb_entry->relogin_timer, 0);
31613 clear_bit(DF_RELOGIN, &ddb_entry->flags);
31614 iscsi_unblock_session(ddb_entry->sess);
31615diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c
31616--- linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
31617+++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
31618@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
31619 ddb_entry->fw_ddb_device_state ==
31620 DDB_DS_SESSION_FAILED) {
31621 /* Reset retry relogin timer */
31622- atomic_inc(&ddb_entry->relogin_retry_count);
31623+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
31624 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
31625 " timed out-retrying"
31626 " relogin (%d)\n",
31627 ha->host_no,
31628 ddb_entry->fw_ddb_index,
31629- atomic_read(&ddb_entry->
31630+ atomic_read_unchecked(&ddb_entry->
31631 relogin_retry_count))
31632 );
31633 start_dpc++;
31634diff -urNp linux-3.0.4/drivers/scsi/scsi.c linux-3.0.4/drivers/scsi/scsi.c
31635--- linux-3.0.4/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
31636+++ linux-3.0.4/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
31637@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
31638 unsigned long timeout;
31639 int rtn = 0;
31640
31641- atomic_inc(&cmd->device->iorequest_cnt);
31642+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31643
31644 /* check if the device is still usable */
31645 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
31646diff -urNp linux-3.0.4/drivers/scsi/scsi_debug.c linux-3.0.4/drivers/scsi/scsi_debug.c
31647--- linux-3.0.4/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
31648+++ linux-3.0.4/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
31649@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
31650 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
31651 unsigned char *cmd = (unsigned char *)scp->cmnd;
31652
31653+ pax_track_stack();
31654+
31655 if ((errsts = check_readiness(scp, 1, devip)))
31656 return errsts;
31657 memset(arr, 0, sizeof(arr));
31658@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
31659 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
31660 unsigned char *cmd = (unsigned char *)scp->cmnd;
31661
31662+ pax_track_stack();
31663+
31664 if ((errsts = check_readiness(scp, 1, devip)))
31665 return errsts;
31666 memset(arr, 0, sizeof(arr));
31667diff -urNp linux-3.0.4/drivers/scsi/scsi_lib.c linux-3.0.4/drivers/scsi/scsi_lib.c
31668--- linux-3.0.4/drivers/scsi/scsi_lib.c 2011-09-02 18:11:21.000000000 -0400
31669+++ linux-3.0.4/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
31670@@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
31671 shost = sdev->host;
31672 scsi_init_cmd_errh(cmd);
31673 cmd->result = DID_NO_CONNECT << 16;
31674- atomic_inc(&cmd->device->iorequest_cnt);
31675+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31676
31677 /*
31678 * SCSI request completion path will do scsi_device_unbusy(),
31679@@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
31680
31681 INIT_LIST_HEAD(&cmd->eh_entry);
31682
31683- atomic_inc(&cmd->device->iodone_cnt);
31684+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
31685 if (cmd->result)
31686- atomic_inc(&cmd->device->ioerr_cnt);
31687+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
31688
31689 disposition = scsi_decide_disposition(cmd);
31690 if (disposition != SUCCESS &&
31691diff -urNp linux-3.0.4/drivers/scsi/scsi_sysfs.c linux-3.0.4/drivers/scsi/scsi_sysfs.c
31692--- linux-3.0.4/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
31693+++ linux-3.0.4/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
31694@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
31695 char *buf) \
31696 { \
31697 struct scsi_device *sdev = to_scsi_device(dev); \
31698- unsigned long long count = atomic_read(&sdev->field); \
31699+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
31700 return snprintf(buf, 20, "0x%llx\n", count); \
31701 } \
31702 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
31703diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_fc.c linux-3.0.4/drivers/scsi/scsi_transport_fc.c
31704--- linux-3.0.4/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
31705+++ linux-3.0.4/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
31706@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
31707 * Netlink Infrastructure
31708 */
31709
31710-static atomic_t fc_event_seq;
31711+static atomic_unchecked_t fc_event_seq;
31712
31713 /**
31714 * fc_get_event_number - Obtain the next sequential FC event number
31715@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
31716 u32
31717 fc_get_event_number(void)
31718 {
31719- return atomic_add_return(1, &fc_event_seq);
31720+ return atomic_add_return_unchecked(1, &fc_event_seq);
31721 }
31722 EXPORT_SYMBOL(fc_get_event_number);
31723
31724@@ -645,7 +645,7 @@ static __init int fc_transport_init(void
31725 {
31726 int error;
31727
31728- atomic_set(&fc_event_seq, 0);
31729+ atomic_set_unchecked(&fc_event_seq, 0);
31730
31731 error = transport_class_register(&fc_host_class);
31732 if (error)
31733@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
31734 char *cp;
31735
31736 *val = simple_strtoul(buf, &cp, 0);
31737- if ((*cp && (*cp != '\n')) || (*val < 0))
31738+ if (*cp && (*cp != '\n'))
31739 return -EINVAL;
31740 /*
31741 * Check for overflow; dev_loss_tmo is u32
31742diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c
31743--- linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
31744+++ linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
31745@@ -83,7 +83,7 @@ struct iscsi_internal {
31746 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
31747 };
31748
31749-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
31750+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
31751 static struct workqueue_struct *iscsi_eh_timer_workq;
31752
31753 /*
31754@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
31755 int err;
31756
31757 ihost = shost->shost_data;
31758- session->sid = atomic_add_return(1, &iscsi_session_nr);
31759+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
31760
31761 if (id == ISCSI_MAX_TARGET) {
31762 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
31763@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
31764 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
31765 ISCSI_TRANSPORT_VERSION);
31766
31767- atomic_set(&iscsi_session_nr, 0);
31768+ atomic_set_unchecked(&iscsi_session_nr, 0);
31769
31770 err = class_register(&iscsi_transport_class);
31771 if (err)
31772diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_srp.c linux-3.0.4/drivers/scsi/scsi_transport_srp.c
31773--- linux-3.0.4/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
31774+++ linux-3.0.4/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
31775@@ -33,7 +33,7 @@
31776 #include "scsi_transport_srp_internal.h"
31777
31778 struct srp_host_attrs {
31779- atomic_t next_port_id;
31780+ atomic_unchecked_t next_port_id;
31781 };
31782 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
31783
31784@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
31785 struct Scsi_Host *shost = dev_to_shost(dev);
31786 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
31787
31788- atomic_set(&srp_host->next_port_id, 0);
31789+ atomic_set_unchecked(&srp_host->next_port_id, 0);
31790 return 0;
31791 }
31792
31793@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
31794 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
31795 rport->roles = ids->roles;
31796
31797- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
31798+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
31799 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
31800
31801 transport_setup_device(&rport->dev);
31802diff -urNp linux-3.0.4/drivers/scsi/sg.c linux-3.0.4/drivers/scsi/sg.c
31803--- linux-3.0.4/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
31804+++ linux-3.0.4/drivers/scsi/sg.c 2011-08-23 21:47:56.000000000 -0400
31805@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
31806 const struct file_operations * fops;
31807 };
31808
31809-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
31810+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
31811 {"allow_dio", &adio_fops},
31812 {"debug", &debug_fops},
31813 {"def_reserved_size", &dressz_fops},
31814@@ -2325,7 +2325,7 @@ sg_proc_init(void)
31815 {
31816 int k, mask;
31817 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
31818- struct sg_proc_leaf * leaf;
31819+ const struct sg_proc_leaf * leaf;
31820
31821 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
31822 if (!sg_proc_sgp)
31823diff -urNp linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c
31824--- linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
31825+++ linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
31826@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
31827 int do_iounmap = 0;
31828 int do_disable_device = 1;
31829
31830+ pax_track_stack();
31831+
31832 memset(&sym_dev, 0, sizeof(sym_dev));
31833 memset(&nvram, 0, sizeof(nvram));
31834 sym_dev.pdev = pdev;
31835diff -urNp linux-3.0.4/drivers/scsi/vmw_pvscsi.c linux-3.0.4/drivers/scsi/vmw_pvscsi.c
31836--- linux-3.0.4/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
31837+++ linux-3.0.4/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
31838@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
31839 dma_addr_t base;
31840 unsigned i;
31841
31842+ pax_track_stack();
31843+
31844 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
31845 cmd.reqRingNumPages = adapter->req_pages;
31846 cmd.cmpRingNumPages = adapter->cmp_pages;
31847diff -urNp linux-3.0.4/drivers/spi/spi.c linux-3.0.4/drivers/spi/spi.c
31848--- linux-3.0.4/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
31849+++ linux-3.0.4/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
31850@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
31851 EXPORT_SYMBOL_GPL(spi_bus_unlock);
31852
31853 /* portable code must never pass more than 32 bytes */
31854-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
31855+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
31856
31857 static u8 *buf;
31858
31859diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c
31860--- linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-09-02 18:11:21.000000000 -0400
31861+++ linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
31862@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
31863 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
31864
31865
31866-static struct net_device_ops ar6000_netdev_ops = {
31867+static net_device_ops_no_const ar6000_netdev_ops = {
31868 .ndo_init = NULL,
31869 .ndo_open = ar6000_open,
31870 .ndo_stop = ar6000_close,
31871diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
31872--- linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
31873+++ linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
31874@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
31875 typedef struct ar6k_pal_config_s
31876 {
31877 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
31878-}ar6k_pal_config_t;
31879+} __no_const ar6k_pal_config_t;
31880
31881 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
31882 #endif /* _AR6K_PAL_H_ */
31883diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
31884--- linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
31885+++ linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
31886@@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
31887 free_netdev(ifp->net);
31888 }
31889 /* Allocate etherdev, including space for private structure */
31890- ifp->net = alloc_etherdev(sizeof(dhd));
31891+ ifp->net = alloc_etherdev(sizeof(*dhd));
31892 if (!ifp->net) {
31893 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31894 ret = -ENOMEM;
31895 }
31896 if (ret == 0) {
31897 strcpy(ifp->net->name, ifp->name);
31898- memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
31899+ memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
31900 err = dhd_net_attach(&dhd->pub, ifp->idx);
31901 if (err != 0) {
31902 DHD_ERROR(("%s: dhd_net_attach failed, "
31903@@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31904 strcpy(nv_path, nvram_path);
31905
31906 /* Allocate etherdev, including space for private structure */
31907- net = alloc_etherdev(sizeof(dhd));
31908+ net = alloc_etherdev(sizeof(*dhd));
31909 if (!net) {
31910 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31911 goto fail;
31912@@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31913 /*
31914 * Save the dhd_info into the priv
31915 */
31916- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31917+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31918
31919 /* Set network interface name if it was provided as module parameter */
31920 if (iface_name[0]) {
31921@@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31922 /*
31923 * Save the dhd_info into the priv
31924 */
31925- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31926+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31927
31928 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
31929 g_bus = bus;
31930diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
31931--- linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
31932+++ linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
31933@@ -593,7 +593,7 @@ struct phy_func_ptr {
31934 initfn_t carrsuppr;
31935 rxsigpwrfn_t rxsigpwr;
31936 detachfn_t detach;
31937-};
31938+} __no_const;
31939 typedef struct phy_func_ptr phy_func_ptr_t;
31940
31941 struct phy_info {
31942diff -urNp linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h
31943--- linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
31944+++ linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
31945@@ -185,7 +185,7 @@ typedef struct {
31946 u16 func, uint bustype, void *regsva, void *param);
31947 /* detach from device */
31948 void (*detach) (void *ch);
31949-} bcmsdh_driver_t;
31950+} __no_const bcmsdh_driver_t;
31951
31952 /* platform specific/high level functions */
31953 extern int bcmsdh_register(bcmsdh_driver_t *driver);
31954diff -urNp linux-3.0.4/drivers/staging/et131x/et1310_tx.c linux-3.0.4/drivers/staging/et131x/et1310_tx.c
31955--- linux-3.0.4/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
31956+++ linux-3.0.4/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
31957@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
31958 struct net_device_stats *stats = &etdev->net_stats;
31959
31960 if (tcb->flags & fMP_DEST_BROAD)
31961- atomic_inc(&etdev->Stats.brdcstxmt);
31962+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
31963 else if (tcb->flags & fMP_DEST_MULTI)
31964- atomic_inc(&etdev->Stats.multixmt);
31965+ atomic_inc_unchecked(&etdev->Stats.multixmt);
31966 else
31967- atomic_inc(&etdev->Stats.unixmt);
31968+ atomic_inc_unchecked(&etdev->Stats.unixmt);
31969
31970 if (tcb->skb) {
31971 stats->tx_bytes += tcb->skb->len;
31972diff -urNp linux-3.0.4/drivers/staging/et131x/et131x_adapter.h linux-3.0.4/drivers/staging/et131x/et131x_adapter.h
31973--- linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
31974+++ linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
31975@@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
31976 * operations
31977 */
31978 u32 unircv; /* # multicast packets received */
31979- atomic_t unixmt; /* # multicast packets for Tx */
31980+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
31981 u32 multircv; /* # multicast packets received */
31982- atomic_t multixmt; /* # multicast packets for Tx */
31983+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
31984 u32 brdcstrcv; /* # broadcast packets received */
31985- atomic_t brdcstxmt; /* # broadcast packets for Tx */
31986+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
31987 u32 norcvbuf; /* # Rx packets discarded */
31988 u32 noxmtbuf; /* # Tx packets discarded */
31989
31990diff -urNp linux-3.0.4/drivers/staging/hv/channel.c linux-3.0.4/drivers/staging/hv/channel.c
31991--- linux-3.0.4/drivers/staging/hv/channel.c 2011-09-02 18:11:21.000000000 -0400
31992+++ linux-3.0.4/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
31993@@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
31994 int ret = 0;
31995 int t;
31996
31997- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31998- atomic_inc(&vmbus_connection.next_gpadl_handle);
31999+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
32000+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
32001
32002 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
32003 if (ret)
32004diff -urNp linux-3.0.4/drivers/staging/hv/hv.c linux-3.0.4/drivers/staging/hv/hv.c
32005--- linux-3.0.4/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
32006+++ linux-3.0.4/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
32007@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
32008 u64 output_address = (output) ? virt_to_phys(output) : 0;
32009 u32 output_address_hi = output_address >> 32;
32010 u32 output_address_lo = output_address & 0xFFFFFFFF;
32011- volatile void *hypercall_page = hv_context.hypercall_page;
32012+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
32013
32014 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
32015 "=a"(hv_status_lo) : "d" (control_hi),
32016diff -urNp linux-3.0.4/drivers/staging/hv/hv_mouse.c linux-3.0.4/drivers/staging/hv/hv_mouse.c
32017--- linux-3.0.4/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
32018+++ linux-3.0.4/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
32019@@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
32020 if (hid_dev) {
32021 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
32022
32023- hid_dev->ll_driver->open = mousevsc_hid_open;
32024- hid_dev->ll_driver->close = mousevsc_hid_close;
32025+ pax_open_kernel();
32026+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
32027+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
32028+ pax_close_kernel();
32029
32030 hid_dev->bus = BUS_VIRTUAL;
32031 hid_dev->vendor = input_device_ctx->device_info.vendor;
32032diff -urNp linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h
32033--- linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
32034+++ linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
32035@@ -559,7 +559,7 @@ enum vmbus_connect_state {
32036 struct vmbus_connection {
32037 enum vmbus_connect_state conn_state;
32038
32039- atomic_t next_gpadl_handle;
32040+ atomic_unchecked_t next_gpadl_handle;
32041
32042 /*
32043 * Represents channel interrupts. Each bit position represents a
32044diff -urNp linux-3.0.4/drivers/staging/hv/rndis_filter.c linux-3.0.4/drivers/staging/hv/rndis_filter.c
32045--- linux-3.0.4/drivers/staging/hv/rndis_filter.c 2011-09-02 18:11:21.000000000 -0400
32046+++ linux-3.0.4/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
32047@@ -43,7 +43,7 @@ struct rndis_device {
32048
32049 enum rndis_device_state state;
32050 u32 link_stat;
32051- atomic_t new_req_id;
32052+ atomic_unchecked_t new_req_id;
32053
32054 spinlock_t request_lock;
32055 struct list_head req_list;
32056@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
32057 * template
32058 */
32059 set = &rndis_msg->msg.set_req;
32060- set->req_id = atomic_inc_return(&dev->new_req_id);
32061+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32062
32063 /* Add to the request list */
32064 spin_lock_irqsave(&dev->request_lock, flags);
32065@@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
32066
32067 /* Setup the rndis set */
32068 halt = &request->request_msg.msg.halt_req;
32069- halt->req_id = atomic_inc_return(&dev->new_req_id);
32070+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32071
32072 /* Ignore return since this msg is optional. */
32073 rndis_filter_send_request(dev, request);
32074diff -urNp linux-3.0.4/drivers/staging/hv/vmbus_drv.c linux-3.0.4/drivers/staging/hv/vmbus_drv.c
32075--- linux-3.0.4/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
32076+++ linux-3.0.4/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
32077@@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
32078 {
32079 int ret = 0;
32080
32081- static atomic_t device_num = ATOMIC_INIT(0);
32082+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32083
32084 /* Set the device name. Otherwise, device_register() will fail. */
32085 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32086- atomic_inc_return(&device_num));
32087+ atomic_inc_return_unchecked(&device_num));
32088
32089 /* The new device belongs to this bus */
32090 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
32091diff -urNp linux-3.0.4/drivers/staging/iio/ring_generic.h linux-3.0.4/drivers/staging/iio/ring_generic.h
32092--- linux-3.0.4/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
32093+++ linux-3.0.4/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
32094@@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
32095
32096 int (*is_enabled)(struct iio_ring_buffer *ring);
32097 int (*enable)(struct iio_ring_buffer *ring);
32098-};
32099+} __no_const;
32100
32101 struct iio_ring_setup_ops {
32102 int (*preenable)(struct iio_dev *);
32103diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet.c linux-3.0.4/drivers/staging/octeon/ethernet.c
32104--- linux-3.0.4/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
32105+++ linux-3.0.4/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
32106@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32107 * since the RX tasklet also increments it.
32108 */
32109 #ifdef CONFIG_64BIT
32110- atomic64_add(rx_status.dropped_packets,
32111- (atomic64_t *)&priv->stats.rx_dropped);
32112+ atomic64_add_unchecked(rx_status.dropped_packets,
32113+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32114 #else
32115- atomic_add(rx_status.dropped_packets,
32116- (atomic_t *)&priv->stats.rx_dropped);
32117+ atomic_add_unchecked(rx_status.dropped_packets,
32118+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
32119 #endif
32120 }
32121
32122diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet-rx.c linux-3.0.4/drivers/staging/octeon/ethernet-rx.c
32123--- linux-3.0.4/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
32124+++ linux-3.0.4/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
32125@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32126 /* Increment RX stats for virtual ports */
32127 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32128 #ifdef CONFIG_64BIT
32129- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32130- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32131+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32132+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32133 #else
32134- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32135- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32136+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32137+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32138 #endif
32139 }
32140 netif_receive_skb(skb);
32141@@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32142 dev->name);
32143 */
32144 #ifdef CONFIG_64BIT
32145- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32146+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32147 #else
32148- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32149+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32150 #endif
32151 dev_kfree_skb_irq(skb);
32152 }
32153diff -urNp linux-3.0.4/drivers/staging/pohmelfs/inode.c linux-3.0.4/drivers/staging/pohmelfs/inode.c
32154--- linux-3.0.4/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
32155+++ linux-3.0.4/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
32156@@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
32157 mutex_init(&psb->mcache_lock);
32158 psb->mcache_root = RB_ROOT;
32159 psb->mcache_timeout = msecs_to_jiffies(5000);
32160- atomic_long_set(&psb->mcache_gen, 0);
32161+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
32162
32163 psb->trans_max_pages = 100;
32164
32165@@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
32166 INIT_LIST_HEAD(&psb->crypto_ready_list);
32167 INIT_LIST_HEAD(&psb->crypto_active_list);
32168
32169- atomic_set(&psb->trans_gen, 1);
32170+ atomic_set_unchecked(&psb->trans_gen, 1);
32171 atomic_long_set(&psb->total_inodes, 0);
32172
32173 mutex_init(&psb->state_lock);
32174diff -urNp linux-3.0.4/drivers/staging/pohmelfs/mcache.c linux-3.0.4/drivers/staging/pohmelfs/mcache.c
32175--- linux-3.0.4/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
32176+++ linux-3.0.4/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
32177@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32178 m->data = data;
32179 m->start = start;
32180 m->size = size;
32181- m->gen = atomic_long_inc_return(&psb->mcache_gen);
32182+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32183
32184 mutex_lock(&psb->mcache_lock);
32185 err = pohmelfs_mcache_insert(psb, m);
32186diff -urNp linux-3.0.4/drivers/staging/pohmelfs/netfs.h linux-3.0.4/drivers/staging/pohmelfs/netfs.h
32187--- linux-3.0.4/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
32188+++ linux-3.0.4/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
32189@@ -571,14 +571,14 @@ struct pohmelfs_config;
32190 struct pohmelfs_sb {
32191 struct rb_root mcache_root;
32192 struct mutex mcache_lock;
32193- atomic_long_t mcache_gen;
32194+ atomic_long_unchecked_t mcache_gen;
32195 unsigned long mcache_timeout;
32196
32197 unsigned int idx;
32198
32199 unsigned int trans_retries;
32200
32201- atomic_t trans_gen;
32202+ atomic_unchecked_t trans_gen;
32203
32204 unsigned int crypto_attached_size;
32205 unsigned int crypto_align_size;
32206diff -urNp linux-3.0.4/drivers/staging/pohmelfs/trans.c linux-3.0.4/drivers/staging/pohmelfs/trans.c
32207--- linux-3.0.4/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
32208+++ linux-3.0.4/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
32209@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32210 int err;
32211 struct netfs_cmd *cmd = t->iovec.iov_base;
32212
32213- t->gen = atomic_inc_return(&psb->trans_gen);
32214+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32215
32216 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32217 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32218diff -urNp linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h
32219--- linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
32220+++ linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
32221@@ -83,7 +83,7 @@ struct _io_ops {
32222 u8 *pmem);
32223 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32224 u8 *pmem);
32225-};
32226+} __no_const;
32227
32228 struct io_req {
32229 struct list_head list;
32230diff -urNp linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c
32231--- linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
32232+++ linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c 2011-08-24 18:21:41.000000000 -0400
32233@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
32234 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
32235
32236 if (rlen)
32237- if (copy_to_user(data, &resp, rlen))
32238+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
32239 return -EFAULT;
32240
32241 return 0;
32242diff -urNp linux-3.0.4/drivers/staging/tty/stallion.c linux-3.0.4/drivers/staging/tty/stallion.c
32243--- linux-3.0.4/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
32244+++ linux-3.0.4/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
32245@@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32246 struct stlport stl_dummyport;
32247 struct stlport *portp;
32248
32249+ pax_track_stack();
32250+
32251 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32252 return -EFAULT;
32253 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32254diff -urNp linux-3.0.4/drivers/staging/usbip/usbip_common.h linux-3.0.4/drivers/staging/usbip/usbip_common.h
32255--- linux-3.0.4/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
32256+++ linux-3.0.4/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
32257@@ -315,7 +315,7 @@ struct usbip_device {
32258 void (*shutdown)(struct usbip_device *);
32259 void (*reset)(struct usbip_device *);
32260 void (*unusable)(struct usbip_device *);
32261- } eh_ops;
32262+ } __no_const eh_ops;
32263 };
32264
32265 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
32266diff -urNp linux-3.0.4/drivers/staging/usbip/vhci.h linux-3.0.4/drivers/staging/usbip/vhci.h
32267--- linux-3.0.4/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
32268+++ linux-3.0.4/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
32269@@ -94,7 +94,7 @@ struct vhci_hcd {
32270 unsigned resuming:1;
32271 unsigned long re_timeout;
32272
32273- atomic_t seqnum;
32274+ atomic_unchecked_t seqnum;
32275
32276 /*
32277 * NOTE:
32278diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_hcd.c linux-3.0.4/drivers/staging/usbip/vhci_hcd.c
32279--- linux-3.0.4/drivers/staging/usbip/vhci_hcd.c 2011-09-02 18:11:21.000000000 -0400
32280+++ linux-3.0.4/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
32281@@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
32282 return;
32283 }
32284
32285- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32286+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32287 if (priv->seqnum == 0xffff)
32288 dev_info(&urb->dev->dev, "seqnum max\n");
32289
32290@@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
32291 return -ENOMEM;
32292 }
32293
32294- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32295+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32296 if (unlink->seqnum == 0xffff)
32297 pr_info("seqnum max\n");
32298
32299@@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
32300 vdev->rhport = rhport;
32301 }
32302
32303- atomic_set(&vhci->seqnum, 0);
32304+ atomic_set_unchecked(&vhci->seqnum, 0);
32305 spin_lock_init(&vhci->lock);
32306
32307 hcd->power_budget = 0; /* no limit */
32308diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_rx.c linux-3.0.4/drivers/staging/usbip/vhci_rx.c
32309--- linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
32310+++ linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
32311@@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
32312 if (!urb) {
32313 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
32314 pr_info("max seqnum %d\n",
32315- atomic_read(&the_controller->seqnum));
32316+ atomic_read_unchecked(&the_controller->seqnum));
32317 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32318 return;
32319 }
32320diff -urNp linux-3.0.4/drivers/staging/vt6655/hostap.c linux-3.0.4/drivers/staging/vt6655/hostap.c
32321--- linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
32322+++ linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
32323@@ -79,14 +79,13 @@ static int msglevel
32324 *
32325 */
32326
32327+static net_device_ops_no_const apdev_netdev_ops;
32328+
32329 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32330 {
32331 PSDevice apdev_priv;
32332 struct net_device *dev = pDevice->dev;
32333 int ret;
32334- const struct net_device_ops apdev_netdev_ops = {
32335- .ndo_start_xmit = pDevice->tx_80211,
32336- };
32337
32338 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32339
32340@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
32341 *apdev_priv = *pDevice;
32342 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32343
32344+ /* only half broken now */
32345+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32346 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32347
32348 pDevice->apdev->type = ARPHRD_IEEE80211;
32349diff -urNp linux-3.0.4/drivers/staging/vt6656/hostap.c linux-3.0.4/drivers/staging/vt6656/hostap.c
32350--- linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
32351+++ linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
32352@@ -80,14 +80,13 @@ static int msglevel
32353 *
32354 */
32355
32356+static net_device_ops_no_const apdev_netdev_ops;
32357+
32358 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32359 {
32360 PSDevice apdev_priv;
32361 struct net_device *dev = pDevice->dev;
32362 int ret;
32363- const struct net_device_ops apdev_netdev_ops = {
32364- .ndo_start_xmit = pDevice->tx_80211,
32365- };
32366
32367 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32368
32369@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
32370 *apdev_priv = *pDevice;
32371 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32372
32373+ /* only half broken now */
32374+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32375 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32376
32377 pDevice->apdev->type = ARPHRD_IEEE80211;
32378diff -urNp linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c
32379--- linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
32380+++ linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
32381@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32382
32383 struct usbctlx_completor {
32384 int (*complete) (struct usbctlx_completor *);
32385-};
32386+} __no_const;
32387
32388 static int
32389 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32390diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.c linux-3.0.4/drivers/staging/zcache/tmem.c
32391--- linux-3.0.4/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
32392+++ linux-3.0.4/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
32393@@ -39,7 +39,7 @@
32394 * A tmem host implementation must use this function to register callbacks
32395 * for memory allocation.
32396 */
32397-static struct tmem_hostops tmem_hostops;
32398+static tmem_hostops_no_const tmem_hostops;
32399
32400 static void tmem_objnode_tree_init(void);
32401
32402@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
32403 * A tmem host implementation must use this function to register
32404 * callbacks for a page-accessible memory (PAM) implementation
32405 */
32406-static struct tmem_pamops tmem_pamops;
32407+static tmem_pamops_no_const tmem_pamops;
32408
32409 void tmem_register_pamops(struct tmem_pamops *m)
32410 {
32411diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.h linux-3.0.4/drivers/staging/zcache/tmem.h
32412--- linux-3.0.4/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
32413+++ linux-3.0.4/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
32414@@ -171,6 +171,7 @@ struct tmem_pamops {
32415 int (*get_data)(struct page *, void *, struct tmem_pool *);
32416 void (*free)(void *, struct tmem_pool *);
32417 };
32418+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
32419 extern void tmem_register_pamops(struct tmem_pamops *m);
32420
32421 /* memory allocation methods provided by the host implementation */
32422@@ -180,6 +181,7 @@ struct tmem_hostops {
32423 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
32424 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
32425 };
32426+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
32427 extern void tmem_register_hostops(struct tmem_hostops *m);
32428
32429 /* core tmem accessor functions */
32430diff -urNp linux-3.0.4/drivers/target/target_core_alua.c linux-3.0.4/drivers/target/target_core_alua.c
32431--- linux-3.0.4/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
32432+++ linux-3.0.4/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
32433@@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32434 char path[ALUA_METADATA_PATH_LEN];
32435 int len;
32436
32437+ pax_track_stack();
32438+
32439 memset(path, 0, ALUA_METADATA_PATH_LEN);
32440
32441 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32442@@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32443 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32444 int len;
32445
32446+ pax_track_stack();
32447+
32448 memset(path, 0, ALUA_METADATA_PATH_LEN);
32449 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32450
32451diff -urNp linux-3.0.4/drivers/target/target_core_cdb.c linux-3.0.4/drivers/target/target_core_cdb.c
32452--- linux-3.0.4/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
32453+++ linux-3.0.4/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
32454@@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32455 int length = 0;
32456 unsigned char buf[SE_MODE_PAGE_BUF];
32457
32458+ pax_track_stack();
32459+
32460 memset(buf, 0, SE_MODE_PAGE_BUF);
32461
32462 switch (cdb[2] & 0x3f) {
32463diff -urNp linux-3.0.4/drivers/target/target_core_configfs.c linux-3.0.4/drivers/target/target_core_configfs.c
32464--- linux-3.0.4/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
32465+++ linux-3.0.4/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
32466@@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
32467 ssize_t len = 0;
32468 int reg_count = 0, prf_isid;
32469
32470+ pax_track_stack();
32471+
32472 if (!(su_dev->se_dev_ptr))
32473 return -ENODEV;
32474
32475diff -urNp linux-3.0.4/drivers/target/target_core_pr.c linux-3.0.4/drivers/target/target_core_pr.c
32476--- linux-3.0.4/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
32477+++ linux-3.0.4/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
32478@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
32479 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
32480 u16 tpgt;
32481
32482+ pax_track_stack();
32483+
32484 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
32485 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
32486 /*
32487@@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
32488 ssize_t len = 0;
32489 int reg_count = 0;
32490
32491+ pax_track_stack();
32492+
32493 memset(buf, 0, pr_aptpl_buf_len);
32494 /*
32495 * Called to clear metadata once APTPL has been deactivated.
32496@@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
32497 char path[512];
32498 int ret;
32499
32500+ pax_track_stack();
32501+
32502 memset(iov, 0, sizeof(struct iovec));
32503 memset(path, 0, 512);
32504
32505diff -urNp linux-3.0.4/drivers/target/target_core_tmr.c linux-3.0.4/drivers/target/target_core_tmr.c
32506--- linux-3.0.4/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
32507+++ linux-3.0.4/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
32508@@ -269,7 +269,7 @@ int core_tmr_lun_reset(
32509 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
32510 T_TASK(cmd)->t_task_cdbs,
32511 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32512- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32513+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32514 atomic_read(&T_TASK(cmd)->t_transport_active),
32515 atomic_read(&T_TASK(cmd)->t_transport_stop),
32516 atomic_read(&T_TASK(cmd)->t_transport_sent));
32517@@ -311,7 +311,7 @@ int core_tmr_lun_reset(
32518 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
32519 " task: %p, t_fe_count: %d dev: %p\n", task,
32520 fe_count, dev);
32521- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32522+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32523 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
32524 flags);
32525 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32526@@ -321,7 +321,7 @@ int core_tmr_lun_reset(
32527 }
32528 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
32529 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
32530- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32531+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32532 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
32533 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32534
32535diff -urNp linux-3.0.4/drivers/target/target_core_transport.c linux-3.0.4/drivers/target/target_core_transport.c
32536--- linux-3.0.4/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
32537+++ linux-3.0.4/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
32538@@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
32539
32540 dev->queue_depth = dev_limits->queue_depth;
32541 atomic_set(&dev->depth_left, dev->queue_depth);
32542- atomic_set(&dev->dev_ordered_id, 0);
32543+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
32544
32545 se_dev_set_default_attribs(dev, dev_limits);
32546
32547@@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
32548 * Used to determine when ORDERED commands should go from
32549 * Dormant to Active status.
32550 */
32551- cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
32552+ cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
32553 smp_mb__after_atomic_inc();
32554 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
32555 cmd->se_ordered_id, cmd->sam_task_attr,
32556@@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
32557 " t_transport_active: %d t_transport_stop: %d"
32558 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
32559 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32560- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32561+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32562 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
32563 atomic_read(&T_TASK(cmd)->t_transport_active),
32564 atomic_read(&T_TASK(cmd)->t_transport_stop),
32565@@ -2673,9 +2673,9 @@ check_depth:
32566 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
32567 atomic_set(&task->task_active, 1);
32568 atomic_set(&task->task_sent, 1);
32569- atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
32570+ atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
32571
32572- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
32573+ if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
32574 T_TASK(cmd)->t_task_cdbs)
32575 atomic_set(&cmd->transport_sent, 1);
32576
32577@@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
32578 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
32579 }
32580 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
32581- atomic_read(&T_TASK(cmd)->t_transport_aborted))
32582+ atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
32583 goto remove;
32584
32585 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
32586@@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
32587 {
32588 int ret = 0;
32589
32590- if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
32591+ if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
32592 if (!(send_status) ||
32593 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
32594 return 1;
32595@@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
32596 */
32597 if (cmd->data_direction == DMA_TO_DEVICE) {
32598 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
32599- atomic_inc(&T_TASK(cmd)->t_transport_aborted);
32600+ atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
32601 smp_mb__after_atomic_inc();
32602 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
32603 transport_new_cmd_failure(cmd);
32604@@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
32605 CMD_TFO(cmd)->get_task_tag(cmd),
32606 T_TASK(cmd)->t_task_cdbs,
32607 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32608- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32609+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32610 atomic_read(&T_TASK(cmd)->t_transport_active),
32611 atomic_read(&T_TASK(cmd)->t_transport_stop),
32612 atomic_read(&T_TASK(cmd)->t_transport_sent));
32613diff -urNp linux-3.0.4/drivers/telephony/ixj.c linux-3.0.4/drivers/telephony/ixj.c
32614--- linux-3.0.4/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
32615+++ linux-3.0.4/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
32616@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
32617 bool mContinue;
32618 char *pIn, *pOut;
32619
32620+ pax_track_stack();
32621+
32622 if (!SCI_Prepare(j))
32623 return 0;
32624
32625diff -urNp linux-3.0.4/drivers/tty/hvc/hvcs.c linux-3.0.4/drivers/tty/hvc/hvcs.c
32626--- linux-3.0.4/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
32627+++ linux-3.0.4/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
32628@@ -83,6 +83,7 @@
32629 #include <asm/hvcserver.h>
32630 #include <asm/uaccess.h>
32631 #include <asm/vio.h>
32632+#include <asm/local.h>
32633
32634 /*
32635 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32636@@ -270,7 +271,7 @@ struct hvcs_struct {
32637 unsigned int index;
32638
32639 struct tty_struct *tty;
32640- int open_count;
32641+ local_t open_count;
32642
32643 /*
32644 * Used to tell the driver kernel_thread what operations need to take
32645@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
32646
32647 spin_lock_irqsave(&hvcsd->lock, flags);
32648
32649- if (hvcsd->open_count > 0) {
32650+ if (local_read(&hvcsd->open_count) > 0) {
32651 spin_unlock_irqrestore(&hvcsd->lock, flags);
32652 printk(KERN_INFO "HVCS: vterm state unchanged. "
32653 "The hvcs device node is still in use.\n");
32654@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
32655 if ((retval = hvcs_partner_connect(hvcsd)))
32656 goto error_release;
32657
32658- hvcsd->open_count = 1;
32659+ local_set(&hvcsd->open_count, 1);
32660 hvcsd->tty = tty;
32661 tty->driver_data = hvcsd;
32662
32663@@ -1179,7 +1180,7 @@ fast_open:
32664
32665 spin_lock_irqsave(&hvcsd->lock, flags);
32666 kref_get(&hvcsd->kref);
32667- hvcsd->open_count++;
32668+ local_inc(&hvcsd->open_count);
32669 hvcsd->todo_mask |= HVCS_SCHED_READ;
32670 spin_unlock_irqrestore(&hvcsd->lock, flags);
32671
32672@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
32673 hvcsd = tty->driver_data;
32674
32675 spin_lock_irqsave(&hvcsd->lock, flags);
32676- if (--hvcsd->open_count == 0) {
32677+ if (local_dec_and_test(&hvcsd->open_count)) {
32678
32679 vio_disable_interrupts(hvcsd->vdev);
32680
32681@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
32682 free_irq(irq, hvcsd);
32683 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32684 return;
32685- } else if (hvcsd->open_count < 0) {
32686+ } else if (local_read(&hvcsd->open_count) < 0) {
32687 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32688 " is missmanaged.\n",
32689- hvcsd->vdev->unit_address, hvcsd->open_count);
32690+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32691 }
32692
32693 spin_unlock_irqrestore(&hvcsd->lock, flags);
32694@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
32695
32696 spin_lock_irqsave(&hvcsd->lock, flags);
32697 /* Preserve this so that we know how many kref refs to put */
32698- temp_open_count = hvcsd->open_count;
32699+ temp_open_count = local_read(&hvcsd->open_count);
32700
32701 /*
32702 * Don't kref put inside the spinlock because the destruction
32703@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
32704 hvcsd->tty->driver_data = NULL;
32705 hvcsd->tty = NULL;
32706
32707- hvcsd->open_count = 0;
32708+ local_set(&hvcsd->open_count, 0);
32709
32710 /* This will drop any buffered data on the floor which is OK in a hangup
32711 * scenario. */
32712@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
32713 * the middle of a write operation? This is a crummy place to do this
32714 * but we want to keep it all in the spinlock.
32715 */
32716- if (hvcsd->open_count <= 0) {
32717+ if (local_read(&hvcsd->open_count) <= 0) {
32718 spin_unlock_irqrestore(&hvcsd->lock, flags);
32719 return -ENODEV;
32720 }
32721@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
32722 {
32723 struct hvcs_struct *hvcsd = tty->driver_data;
32724
32725- if (!hvcsd || hvcsd->open_count <= 0)
32726+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32727 return 0;
32728
32729 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32730diff -urNp linux-3.0.4/drivers/tty/ipwireless/tty.c linux-3.0.4/drivers/tty/ipwireless/tty.c
32731--- linux-3.0.4/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
32732+++ linux-3.0.4/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
32733@@ -29,6 +29,7 @@
32734 #include <linux/tty_driver.h>
32735 #include <linux/tty_flip.h>
32736 #include <linux/uaccess.h>
32737+#include <asm/local.h>
32738
32739 #include "tty.h"
32740 #include "network.h"
32741@@ -51,7 +52,7 @@ struct ipw_tty {
32742 int tty_type;
32743 struct ipw_network *network;
32744 struct tty_struct *linux_tty;
32745- int open_count;
32746+ local_t open_count;
32747 unsigned int control_lines;
32748 struct mutex ipw_tty_mutex;
32749 int tx_bytes_queued;
32750@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
32751 mutex_unlock(&tty->ipw_tty_mutex);
32752 return -ENODEV;
32753 }
32754- if (tty->open_count == 0)
32755+ if (local_read(&tty->open_count) == 0)
32756 tty->tx_bytes_queued = 0;
32757
32758- tty->open_count++;
32759+ local_inc(&tty->open_count);
32760
32761 tty->linux_tty = linux_tty;
32762 linux_tty->driver_data = tty;
32763@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
32764
32765 static void do_ipw_close(struct ipw_tty *tty)
32766 {
32767- tty->open_count--;
32768-
32769- if (tty->open_count == 0) {
32770+ if (local_dec_return(&tty->open_count) == 0) {
32771 struct tty_struct *linux_tty = tty->linux_tty;
32772
32773 if (linux_tty != NULL) {
32774@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
32775 return;
32776
32777 mutex_lock(&tty->ipw_tty_mutex);
32778- if (tty->open_count == 0) {
32779+ if (local_read(&tty->open_count) == 0) {
32780 mutex_unlock(&tty->ipw_tty_mutex);
32781 return;
32782 }
32783@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
32784 return;
32785 }
32786
32787- if (!tty->open_count) {
32788+ if (!local_read(&tty->open_count)) {
32789 mutex_unlock(&tty->ipw_tty_mutex);
32790 return;
32791 }
32792@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
32793 return -ENODEV;
32794
32795 mutex_lock(&tty->ipw_tty_mutex);
32796- if (!tty->open_count) {
32797+ if (!local_read(&tty->open_count)) {
32798 mutex_unlock(&tty->ipw_tty_mutex);
32799 return -EINVAL;
32800 }
32801@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
32802 if (!tty)
32803 return -ENODEV;
32804
32805- if (!tty->open_count)
32806+ if (!local_read(&tty->open_count))
32807 return -EINVAL;
32808
32809 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32810@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
32811 if (!tty)
32812 return 0;
32813
32814- if (!tty->open_count)
32815+ if (!local_read(&tty->open_count))
32816 return 0;
32817
32818 return tty->tx_bytes_queued;
32819@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
32820 if (!tty)
32821 return -ENODEV;
32822
32823- if (!tty->open_count)
32824+ if (!local_read(&tty->open_count))
32825 return -EINVAL;
32826
32827 return get_control_lines(tty);
32828@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
32829 if (!tty)
32830 return -ENODEV;
32831
32832- if (!tty->open_count)
32833+ if (!local_read(&tty->open_count))
32834 return -EINVAL;
32835
32836 return set_control_lines(tty, set, clear);
32837@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
32838 if (!tty)
32839 return -ENODEV;
32840
32841- if (!tty->open_count)
32842+ if (!local_read(&tty->open_count))
32843 return -EINVAL;
32844
32845 /* FIXME: Exactly how is the tty object locked here .. */
32846@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
32847 against a parallel ioctl etc */
32848 mutex_lock(&ttyj->ipw_tty_mutex);
32849 }
32850- while (ttyj->open_count)
32851+ while (local_read(&ttyj->open_count))
32852 do_ipw_close(ttyj);
32853 ipwireless_disassociate_network_ttys(network,
32854 ttyj->channel_idx);
32855diff -urNp linux-3.0.4/drivers/tty/n_gsm.c linux-3.0.4/drivers/tty/n_gsm.c
32856--- linux-3.0.4/drivers/tty/n_gsm.c 2011-09-02 18:11:21.000000000 -0400
32857+++ linux-3.0.4/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
32858@@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
32859 return NULL;
32860 spin_lock_init(&dlci->lock);
32861 dlci->fifo = &dlci->_fifo;
32862- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
32863+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
32864 kfree(dlci);
32865 return NULL;
32866 }
32867diff -urNp linux-3.0.4/drivers/tty/n_tty.c linux-3.0.4/drivers/tty/n_tty.c
32868--- linux-3.0.4/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
32869+++ linux-3.0.4/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
32870@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
32871 {
32872 *ops = tty_ldisc_N_TTY;
32873 ops->owner = NULL;
32874- ops->refcount = ops->flags = 0;
32875+ atomic_set(&ops->refcount, 0);
32876+ ops->flags = 0;
32877 }
32878 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
32879diff -urNp linux-3.0.4/drivers/tty/pty.c linux-3.0.4/drivers/tty/pty.c
32880--- linux-3.0.4/drivers/tty/pty.c 2011-07-21 22:17:23.000000000 -0400
32881+++ linux-3.0.4/drivers/tty/pty.c 2011-08-23 21:47:56.000000000 -0400
32882@@ -754,8 +754,10 @@ static void __init unix98_pty_init(void)
32883 register_sysctl_table(pty_root_table);
32884
32885 /* Now create the /dev/ptmx special device */
32886+ pax_open_kernel();
32887 tty_default_fops(&ptmx_fops);
32888- ptmx_fops.open = ptmx_open;
32889+ *(void **)&ptmx_fops.open = ptmx_open;
32890+ pax_close_kernel();
32891
32892 cdev_init(&ptmx_cdev, &ptmx_fops);
32893 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
32894diff -urNp linux-3.0.4/drivers/tty/rocket.c linux-3.0.4/drivers/tty/rocket.c
32895--- linux-3.0.4/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
32896+++ linux-3.0.4/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
32897@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
32898 struct rocket_ports tmp;
32899 int board;
32900
32901+ pax_track_stack();
32902+
32903 if (!retports)
32904 return -EFAULT;
32905 memset(&tmp, 0, sizeof (tmp));
32906diff -urNp linux-3.0.4/drivers/tty/serial/kgdboc.c linux-3.0.4/drivers/tty/serial/kgdboc.c
32907--- linux-3.0.4/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
32908+++ linux-3.0.4/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
32909@@ -23,8 +23,9 @@
32910 #define MAX_CONFIG_LEN 40
32911
32912 static struct kgdb_io kgdboc_io_ops;
32913+static struct kgdb_io kgdboc_io_ops_console;
32914
32915-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
32916+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
32917 static int configured = -1;
32918
32919 static char config[MAX_CONFIG_LEN];
32920@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
32921 kgdboc_unregister_kbd();
32922 if (configured == 1)
32923 kgdb_unregister_io_module(&kgdboc_io_ops);
32924+ else if (configured == 2)
32925+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
32926 }
32927
32928 static int configure_kgdboc(void)
32929@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
32930 int err;
32931 char *cptr = config;
32932 struct console *cons;
32933+ int is_console = 0;
32934
32935 err = kgdboc_option_setup(config);
32936 if (err || !strlen(config) || isspace(config[0]))
32937 goto noconfig;
32938
32939 err = -ENODEV;
32940- kgdboc_io_ops.is_console = 0;
32941 kgdb_tty_driver = NULL;
32942
32943 kgdboc_use_kms = 0;
32944@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
32945 int idx;
32946 if (cons->device && cons->device(cons, &idx) == p &&
32947 idx == tty_line) {
32948- kgdboc_io_ops.is_console = 1;
32949+ is_console = 1;
32950 break;
32951 }
32952 cons = cons->next;
32953@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
32954 kgdb_tty_line = tty_line;
32955
32956 do_register:
32957- err = kgdb_register_io_module(&kgdboc_io_ops);
32958+ if (is_console) {
32959+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
32960+ configured = 2;
32961+ } else {
32962+ err = kgdb_register_io_module(&kgdboc_io_ops);
32963+ configured = 1;
32964+ }
32965 if (err)
32966 goto noconfig;
32967
32968- configured = 1;
32969-
32970 return 0;
32971
32972 noconfig:
32973@@ -212,7 +219,7 @@ noconfig:
32974 static int __init init_kgdboc(void)
32975 {
32976 /* Already configured? */
32977- if (configured == 1)
32978+ if (configured >= 1)
32979 return 0;
32980
32981 return configure_kgdboc();
32982@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
32983 if (config[len - 1] == '\n')
32984 config[len - 1] = '\0';
32985
32986- if (configured == 1)
32987+ if (configured >= 1)
32988 cleanup_kgdboc();
32989
32990 /* Go and configure with the new params. */
32991@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
32992 .post_exception = kgdboc_post_exp_handler,
32993 };
32994
32995+static struct kgdb_io kgdboc_io_ops_console = {
32996+ .name = "kgdboc",
32997+ .read_char = kgdboc_get_char,
32998+ .write_char = kgdboc_put_char,
32999+ .pre_exception = kgdboc_pre_exp_handler,
33000+ .post_exception = kgdboc_post_exp_handler,
33001+ .is_console = 1
33002+};
33003+
33004 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
33005 /* This is only available if kgdboc is a built in for early debugging */
33006 static int __init kgdboc_early_init(char *opt)
33007diff -urNp linux-3.0.4/drivers/tty/serial/mrst_max3110.c linux-3.0.4/drivers/tty/serial/mrst_max3110.c
33008--- linux-3.0.4/drivers/tty/serial/mrst_max3110.c 2011-07-21 22:17:23.000000000 -0400
33009+++ linux-3.0.4/drivers/tty/serial/mrst_max3110.c 2011-08-23 21:48:14.000000000 -0400
33010@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
33011 int loop = 1, num, total = 0;
33012 u8 recv_buf[512], *pbuf;
33013
33014+ pax_track_stack();
33015+
33016 pbuf = recv_buf;
33017 do {
33018 num = max3110_read_multi(max, pbuf);
33019diff -urNp linux-3.0.4/drivers/tty/tty_io.c linux-3.0.4/drivers/tty/tty_io.c
33020--- linux-3.0.4/drivers/tty/tty_io.c 2011-07-21 22:17:23.000000000 -0400
33021+++ linux-3.0.4/drivers/tty/tty_io.c 2011-08-23 21:47:56.000000000 -0400
33022@@ -3215,7 +3215,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33023
33024 void tty_default_fops(struct file_operations *fops)
33025 {
33026- *fops = tty_fops;
33027+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33028 }
33029
33030 /*
33031diff -urNp linux-3.0.4/drivers/tty/tty_ldisc.c linux-3.0.4/drivers/tty/tty_ldisc.c
33032--- linux-3.0.4/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
33033+++ linux-3.0.4/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
33034@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33035 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33036 struct tty_ldisc_ops *ldo = ld->ops;
33037
33038- ldo->refcount--;
33039+ atomic_dec(&ldo->refcount);
33040 module_put(ldo->owner);
33041 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33042
33043@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
33044 spin_lock_irqsave(&tty_ldisc_lock, flags);
33045 tty_ldiscs[disc] = new_ldisc;
33046 new_ldisc->num = disc;
33047- new_ldisc->refcount = 0;
33048+ atomic_set(&new_ldisc->refcount, 0);
33049 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33050
33051 return ret;
33052@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33053 return -EINVAL;
33054
33055 spin_lock_irqsave(&tty_ldisc_lock, flags);
33056- if (tty_ldiscs[disc]->refcount)
33057+ if (atomic_read(&tty_ldiscs[disc]->refcount))
33058 ret = -EBUSY;
33059 else
33060 tty_ldiscs[disc] = NULL;
33061@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33062 if (ldops) {
33063 ret = ERR_PTR(-EAGAIN);
33064 if (try_module_get(ldops->owner)) {
33065- ldops->refcount++;
33066+ atomic_inc(&ldops->refcount);
33067 ret = ldops;
33068 }
33069 }
33070@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33071 unsigned long flags;
33072
33073 spin_lock_irqsave(&tty_ldisc_lock, flags);
33074- ldops->refcount--;
33075+ atomic_dec(&ldops->refcount);
33076 module_put(ldops->owner);
33077 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33078 }
33079diff -urNp linux-3.0.4/drivers/tty/vt/keyboard.c linux-3.0.4/drivers/tty/vt/keyboard.c
33080--- linux-3.0.4/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
33081+++ linux-3.0.4/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
33082@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
33083 kbd->kbdmode == VC_OFF) &&
33084 value != KVAL(K_SAK))
33085 return; /* SAK is allowed even in raw mode */
33086+
33087+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33088+ {
33089+ void *func = fn_handler[value];
33090+ if (func == fn_show_state || func == fn_show_ptregs ||
33091+ func == fn_show_mem)
33092+ return;
33093+ }
33094+#endif
33095+
33096 fn_handler[value](vc);
33097 }
33098
33099diff -urNp linux-3.0.4/drivers/tty/vt/vt.c linux-3.0.4/drivers/tty/vt/vt.c
33100--- linux-3.0.4/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
33101+++ linux-3.0.4/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
33102@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33103
33104 static void notify_write(struct vc_data *vc, unsigned int unicode)
33105 {
33106- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33107+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
33108 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33109 }
33110
33111diff -urNp linux-3.0.4/drivers/tty/vt/vt_ioctl.c linux-3.0.4/drivers/tty/vt/vt_ioctl.c
33112--- linux-3.0.4/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
33113+++ linux-3.0.4/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
33114@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33115 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33116 return -EFAULT;
33117
33118- if (!capable(CAP_SYS_TTY_CONFIG))
33119- perm = 0;
33120-
33121 switch (cmd) {
33122 case KDGKBENT:
33123 key_map = key_maps[s];
33124@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33125 val = (i ? K_HOLE : K_NOSUCHMAP);
33126 return put_user(val, &user_kbe->kb_value);
33127 case KDSKBENT:
33128+ if (!capable(CAP_SYS_TTY_CONFIG))
33129+ perm = 0;
33130+
33131 if (!perm)
33132 return -EPERM;
33133 if (!i && v == K_NOSUCHMAP) {
33134@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33135 int i, j, k;
33136 int ret;
33137
33138- if (!capable(CAP_SYS_TTY_CONFIG))
33139- perm = 0;
33140-
33141 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33142 if (!kbs) {
33143 ret = -ENOMEM;
33144@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33145 kfree(kbs);
33146 return ((p && *p) ? -EOVERFLOW : 0);
33147 case KDSKBSENT:
33148+ if (!capable(CAP_SYS_TTY_CONFIG))
33149+ perm = 0;
33150+
33151 if (!perm) {
33152 ret = -EPERM;
33153 goto reterr;
33154diff -urNp linux-3.0.4/drivers/uio/uio.c linux-3.0.4/drivers/uio/uio.c
33155--- linux-3.0.4/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
33156+++ linux-3.0.4/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
33157@@ -25,6 +25,7 @@
33158 #include <linux/kobject.h>
33159 #include <linux/cdev.h>
33160 #include <linux/uio_driver.h>
33161+#include <asm/local.h>
33162
33163 #define UIO_MAX_DEVICES (1U << MINORBITS)
33164
33165@@ -32,10 +33,10 @@ struct uio_device {
33166 struct module *owner;
33167 struct device *dev;
33168 int minor;
33169- atomic_t event;
33170+ atomic_unchecked_t event;
33171 struct fasync_struct *async_queue;
33172 wait_queue_head_t wait;
33173- int vma_count;
33174+ local_t vma_count;
33175 struct uio_info *info;
33176 struct kobject *map_dir;
33177 struct kobject *portio_dir;
33178@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
33179 struct device_attribute *attr, char *buf)
33180 {
33181 struct uio_device *idev = dev_get_drvdata(dev);
33182- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33183+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33184 }
33185
33186 static struct device_attribute uio_class_attributes[] = {
33187@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
33188 {
33189 struct uio_device *idev = info->uio_dev;
33190
33191- atomic_inc(&idev->event);
33192+ atomic_inc_unchecked(&idev->event);
33193 wake_up_interruptible(&idev->wait);
33194 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33195 }
33196@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
33197 }
33198
33199 listener->dev = idev;
33200- listener->event_count = atomic_read(&idev->event);
33201+ listener->event_count = atomic_read_unchecked(&idev->event);
33202 filep->private_data = listener;
33203
33204 if (idev->info->open) {
33205@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
33206 return -EIO;
33207
33208 poll_wait(filep, &idev->wait, wait);
33209- if (listener->event_count != atomic_read(&idev->event))
33210+ if (listener->event_count != atomic_read_unchecked(&idev->event))
33211 return POLLIN | POLLRDNORM;
33212 return 0;
33213 }
33214@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
33215 do {
33216 set_current_state(TASK_INTERRUPTIBLE);
33217
33218- event_count = atomic_read(&idev->event);
33219+ event_count = atomic_read_unchecked(&idev->event);
33220 if (event_count != listener->event_count) {
33221 if (copy_to_user(buf, &event_count, count))
33222 retval = -EFAULT;
33223@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
33224 static void uio_vma_open(struct vm_area_struct *vma)
33225 {
33226 struct uio_device *idev = vma->vm_private_data;
33227- idev->vma_count++;
33228+ local_inc(&idev->vma_count);
33229 }
33230
33231 static void uio_vma_close(struct vm_area_struct *vma)
33232 {
33233 struct uio_device *idev = vma->vm_private_data;
33234- idev->vma_count--;
33235+ local_dec(&idev->vma_count);
33236 }
33237
33238 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33239@@ -823,7 +824,7 @@ int __uio_register_device(struct module
33240 idev->owner = owner;
33241 idev->info = info;
33242 init_waitqueue_head(&idev->wait);
33243- atomic_set(&idev->event, 0);
33244+ atomic_set_unchecked(&idev->event, 0);
33245
33246 ret = uio_get_minor(idev);
33247 if (ret)
33248diff -urNp linux-3.0.4/drivers/usb/atm/cxacru.c linux-3.0.4/drivers/usb/atm/cxacru.c
33249--- linux-3.0.4/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
33250+++ linux-3.0.4/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
33251@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33252 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33253 if (ret < 2)
33254 return -EINVAL;
33255- if (index < 0 || index > 0x7f)
33256+ if (index > 0x7f)
33257 return -EINVAL;
33258 pos += tmp;
33259
33260diff -urNp linux-3.0.4/drivers/usb/atm/usbatm.c linux-3.0.4/drivers/usb/atm/usbatm.c
33261--- linux-3.0.4/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
33262+++ linux-3.0.4/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
33263@@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33264 if (printk_ratelimit())
33265 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33266 __func__, vpi, vci);
33267- atomic_inc(&vcc->stats->rx_err);
33268+ atomic_inc_unchecked(&vcc->stats->rx_err);
33269 return;
33270 }
33271
33272@@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33273 if (length > ATM_MAX_AAL5_PDU) {
33274 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33275 __func__, length, vcc);
33276- atomic_inc(&vcc->stats->rx_err);
33277+ atomic_inc_unchecked(&vcc->stats->rx_err);
33278 goto out;
33279 }
33280
33281@@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33282 if (sarb->len < pdu_length) {
33283 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33284 __func__, pdu_length, sarb->len, vcc);
33285- atomic_inc(&vcc->stats->rx_err);
33286+ atomic_inc_unchecked(&vcc->stats->rx_err);
33287 goto out;
33288 }
33289
33290 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33291 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33292 __func__, vcc);
33293- atomic_inc(&vcc->stats->rx_err);
33294+ atomic_inc_unchecked(&vcc->stats->rx_err);
33295 goto out;
33296 }
33297
33298@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33299 if (printk_ratelimit())
33300 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33301 __func__, length);
33302- atomic_inc(&vcc->stats->rx_drop);
33303+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33304 goto out;
33305 }
33306
33307@@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33308
33309 vcc->push(vcc, skb);
33310
33311- atomic_inc(&vcc->stats->rx);
33312+ atomic_inc_unchecked(&vcc->stats->rx);
33313 out:
33314 skb_trim(sarb, 0);
33315 }
33316@@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33317 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33318
33319 usbatm_pop(vcc, skb);
33320- atomic_inc(&vcc->stats->tx);
33321+ atomic_inc_unchecked(&vcc->stats->tx);
33322
33323 skb = skb_dequeue(&instance->sndqueue);
33324 }
33325@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33326 if (!left--)
33327 return sprintf(page,
33328 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33329- atomic_read(&atm_dev->stats.aal5.tx),
33330- atomic_read(&atm_dev->stats.aal5.tx_err),
33331- atomic_read(&atm_dev->stats.aal5.rx),
33332- atomic_read(&atm_dev->stats.aal5.rx_err),
33333- atomic_read(&atm_dev->stats.aal5.rx_drop));
33334+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33335+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33336+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33337+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33338+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33339
33340 if (!left--) {
33341 if (instance->disconnected)
33342diff -urNp linux-3.0.4/drivers/usb/core/devices.c linux-3.0.4/drivers/usb/core/devices.c
33343--- linux-3.0.4/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
33344+++ linux-3.0.4/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
33345@@ -126,7 +126,7 @@ static const char format_endpt[] =
33346 * time it gets called.
33347 */
33348 static struct device_connect_event {
33349- atomic_t count;
33350+ atomic_unchecked_t count;
33351 wait_queue_head_t wait;
33352 } device_event = {
33353 .count = ATOMIC_INIT(1),
33354@@ -164,7 +164,7 @@ static const struct class_info clas_info
33355
33356 void usbfs_conn_disc_event(void)
33357 {
33358- atomic_add(2, &device_event.count);
33359+ atomic_add_unchecked(2, &device_event.count);
33360 wake_up(&device_event.wait);
33361 }
33362
33363@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33364
33365 poll_wait(file, &device_event.wait, wait);
33366
33367- event_count = atomic_read(&device_event.count);
33368+ event_count = atomic_read_unchecked(&device_event.count);
33369 if (file->f_version != event_count) {
33370 file->f_version = event_count;
33371 return POLLIN | POLLRDNORM;
33372diff -urNp linux-3.0.4/drivers/usb/core/message.c linux-3.0.4/drivers/usb/core/message.c
33373--- linux-3.0.4/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
33374+++ linux-3.0.4/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
33375@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33376 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33377 if (buf) {
33378 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33379- if (len > 0) {
33380- smallbuf = kmalloc(++len, GFP_NOIO);
33381+ if (len++ > 0) {
33382+ smallbuf = kmalloc(len, GFP_NOIO);
33383 if (!smallbuf)
33384 return buf;
33385 memcpy(smallbuf, buf, len);
33386diff -urNp linux-3.0.4/drivers/usb/early/ehci-dbgp.c linux-3.0.4/drivers/usb/early/ehci-dbgp.c
33387--- linux-3.0.4/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
33388+++ linux-3.0.4/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
33389@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33390
33391 #ifdef CONFIG_KGDB
33392 static struct kgdb_io kgdbdbgp_io_ops;
33393-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33394+static struct kgdb_io kgdbdbgp_io_ops_console;
33395+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33396 #else
33397 #define dbgp_kgdb_mode (0)
33398 #endif
33399@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33400 .write_char = kgdbdbgp_write_char,
33401 };
33402
33403+static struct kgdb_io kgdbdbgp_io_ops_console = {
33404+ .name = "kgdbdbgp",
33405+ .read_char = kgdbdbgp_read_char,
33406+ .write_char = kgdbdbgp_write_char,
33407+ .is_console = 1
33408+};
33409+
33410 static int kgdbdbgp_wait_time;
33411
33412 static int __init kgdbdbgp_parse_config(char *str)
33413@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
33414 ptr++;
33415 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33416 }
33417- kgdb_register_io_module(&kgdbdbgp_io_ops);
33418- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33419+ if (early_dbgp_console.index != -1)
33420+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33421+ else
33422+ kgdb_register_io_module(&kgdbdbgp_io_ops);
33423
33424 return 0;
33425 }
33426diff -urNp linux-3.0.4/drivers/usb/host/xhci-mem.c linux-3.0.4/drivers/usb/host/xhci-mem.c
33427--- linux-3.0.4/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
33428+++ linux-3.0.4/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
33429@@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
33430 unsigned int num_tests;
33431 int i, ret;
33432
33433+ pax_track_stack();
33434+
33435 num_tests = ARRAY_SIZE(simple_test_vector);
33436 for (i = 0; i < num_tests; i++) {
33437 ret = xhci_test_trb_in_td(xhci,
33438diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-hc.h linux-3.0.4/drivers/usb/wusbcore/wa-hc.h
33439--- linux-3.0.4/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
33440+++ linux-3.0.4/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
33441@@ -192,7 +192,7 @@ struct wahc {
33442 struct list_head xfer_delayed_list;
33443 spinlock_t xfer_list_lock;
33444 struct work_struct xfer_work;
33445- atomic_t xfer_id_count;
33446+ atomic_unchecked_t xfer_id_count;
33447 };
33448
33449
33450@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33451 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33452 spin_lock_init(&wa->xfer_list_lock);
33453 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33454- atomic_set(&wa->xfer_id_count, 1);
33455+ atomic_set_unchecked(&wa->xfer_id_count, 1);
33456 }
33457
33458 /**
33459diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c
33460--- linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
33461+++ linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
33462@@ -294,7 +294,7 @@ out:
33463 */
33464 static void wa_xfer_id_init(struct wa_xfer *xfer)
33465 {
33466- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
33467+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
33468 }
33469
33470 /*
33471diff -urNp linux-3.0.4/drivers/vhost/vhost.c linux-3.0.4/drivers/vhost/vhost.c
33472--- linux-3.0.4/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
33473+++ linux-3.0.4/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
33474@@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
33475 return get_user(vq->last_used_idx, &used->idx);
33476 }
33477
33478-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
33479+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
33480 {
33481 struct file *eventfp, *filep = NULL,
33482 *pollstart = NULL, *pollstop = NULL;
33483diff -urNp linux-3.0.4/drivers/video/fbcmap.c linux-3.0.4/drivers/video/fbcmap.c
33484--- linux-3.0.4/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
33485+++ linux-3.0.4/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
33486@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
33487 rc = -ENODEV;
33488 goto out;
33489 }
33490- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
33491- !info->fbops->fb_setcmap)) {
33492+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
33493 rc = -EINVAL;
33494 goto out1;
33495 }
33496diff -urNp linux-3.0.4/drivers/video/fbmem.c linux-3.0.4/drivers/video/fbmem.c
33497--- linux-3.0.4/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
33498+++ linux-3.0.4/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
33499@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
33500 image->dx += image->width + 8;
33501 }
33502 } else if (rotate == FB_ROTATE_UD) {
33503- for (x = 0; x < num && image->dx >= 0; x++) {
33504+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
33505 info->fbops->fb_imageblit(info, image);
33506 image->dx -= image->width + 8;
33507 }
33508@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
33509 image->dy += image->height + 8;
33510 }
33511 } else if (rotate == FB_ROTATE_CCW) {
33512- for (x = 0; x < num && image->dy >= 0; x++) {
33513+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
33514 info->fbops->fb_imageblit(info, image);
33515 image->dy -= image->height + 8;
33516 }
33517@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
33518 int flags = info->flags;
33519 int ret = 0;
33520
33521+ pax_track_stack();
33522+
33523 if (var->activate & FB_ACTIVATE_INV_MODE) {
33524 struct fb_videomode mode1, mode2;
33525
33526@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
33527 void __user *argp = (void __user *)arg;
33528 long ret = 0;
33529
33530+ pax_track_stack();
33531+
33532 switch (cmd) {
33533 case FBIOGET_VSCREENINFO:
33534 if (!lock_fb_info(info))
33535@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
33536 return -EFAULT;
33537 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
33538 return -EINVAL;
33539- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
33540+ if (con2fb.framebuffer >= FB_MAX)
33541 return -EINVAL;
33542 if (!registered_fb[con2fb.framebuffer])
33543 request_module("fb%d", con2fb.framebuffer);
33544diff -urNp linux-3.0.4/drivers/video/i810/i810_accel.c linux-3.0.4/drivers/video/i810/i810_accel.c
33545--- linux-3.0.4/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
33546+++ linux-3.0.4/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
33547@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
33548 }
33549 }
33550 printk("ringbuffer lockup!!!\n");
33551+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
33552 i810_report_error(mmio);
33553 par->dev_flags |= LOCKUP;
33554 info->pixmap.scan_align = 1;
33555diff -urNp linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm
33556--- linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm 2011-07-21 22:17:23.000000000 -0400
33557+++ linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm 2011-08-29 23:49:40.000000000 -0400
33558@@ -1,1604 +1,1123 @@
33559 P3
33560-# Standard 224-color Linux logo
33561 80 80
33562 255
33563- 0 0 0 0 0 0 0 0 0 0 0 0
33564- 0 0 0 0 0 0 0 0 0 0 0 0
33565- 0 0 0 0 0 0 0 0 0 0 0 0
33566- 0 0 0 0 0 0 0 0 0 0 0 0
33567- 0 0 0 0 0 0 0 0 0 0 0 0
33568- 0 0 0 0 0 0 0 0 0 0 0 0
33569- 0 0 0 0 0 0 0 0 0 0 0 0
33570- 0 0 0 0 0 0 0 0 0 0 0 0
33571- 0 0 0 0 0 0 0 0 0 0 0 0
33572- 6 6 6 6 6 6 10 10 10 10 10 10
33573- 10 10 10 6 6 6 6 6 6 6 6 6
33574- 0 0 0 0 0 0 0 0 0 0 0 0
33575- 0 0 0 0 0 0 0 0 0 0 0 0
33576- 0 0 0 0 0 0 0 0 0 0 0 0
33577- 0 0 0 0 0 0 0 0 0 0 0 0
33578- 0 0 0 0 0 0 0 0 0 0 0 0
33579- 0 0 0 0 0 0 0 0 0 0 0 0
33580- 0 0 0 0 0 0 0 0 0 0 0 0
33581- 0 0 0 0 0 0 0 0 0 0 0 0
33582- 0 0 0 0 0 0 0 0 0 0 0 0
33583- 0 0 0 0 0 0 0 0 0 0 0 0
33584- 0 0 0 0 0 0 0 0 0 0 0 0
33585- 0 0 0 0 0 0 0 0 0 0 0 0
33586- 0 0 0 0 0 0 0 0 0 0 0 0
33587- 0 0 0 0 0 0 0 0 0 0 0 0
33588- 0 0 0 0 0 0 0 0 0 0 0 0
33589- 0 0 0 0 0 0 0 0 0 0 0 0
33590- 0 0 0 0 0 0 0 0 0 0 0 0
33591- 0 0 0 6 6 6 10 10 10 14 14 14
33592- 22 22 22 26 26 26 30 30 30 34 34 34
33593- 30 30 30 30 30 30 26 26 26 18 18 18
33594- 14 14 14 10 10 10 6 6 6 0 0 0
33595- 0 0 0 0 0 0 0 0 0 0 0 0
33596- 0 0 0 0 0 0 0 0 0 0 0 0
33597- 0 0 0 0 0 0 0 0 0 0 0 0
33598- 0 0 0 0 0 0 0 0 0 0 0 0
33599- 0 0 0 0 0 0 0 0 0 0 0 0
33600- 0 0 0 0 0 0 0 0 0 0 0 0
33601- 0 0 0 0 0 0 0 0 0 0 0 0
33602- 0 0 0 0 0 0 0 0 0 0 0 0
33603- 0 0 0 0 0 0 0 0 0 0 0 0
33604- 0 0 0 0 0 1 0 0 1 0 0 0
33605- 0 0 0 0 0 0 0 0 0 0 0 0
33606- 0 0 0 0 0 0 0 0 0 0 0 0
33607- 0 0 0 0 0 0 0 0 0 0 0 0
33608- 0 0 0 0 0 0 0 0 0 0 0 0
33609- 0 0 0 0 0 0 0 0 0 0 0 0
33610- 0 0 0 0 0 0 0 0 0 0 0 0
33611- 6 6 6 14 14 14 26 26 26 42 42 42
33612- 54 54 54 66 66 66 78 78 78 78 78 78
33613- 78 78 78 74 74 74 66 66 66 54 54 54
33614- 42 42 42 26 26 26 18 18 18 10 10 10
33615- 6 6 6 0 0 0 0 0 0 0 0 0
33616- 0 0 0 0 0 0 0 0 0 0 0 0
33617- 0 0 0 0 0 0 0 0 0 0 0 0
33618- 0 0 0 0 0 0 0 0 0 0 0 0
33619- 0 0 0 0 0 0 0 0 0 0 0 0
33620- 0 0 0 0 0 0 0 0 0 0 0 0
33621- 0 0 0 0 0 0 0 0 0 0 0 0
33622- 0 0 0 0 0 0 0 0 0 0 0 0
33623- 0 0 0 0 0 0 0 0 0 0 0 0
33624- 0 0 1 0 0 0 0 0 0 0 0 0
33625- 0 0 0 0 0 0 0 0 0 0 0 0
33626- 0 0 0 0 0 0 0 0 0 0 0 0
33627- 0 0 0 0 0 0 0 0 0 0 0 0
33628- 0 0 0 0 0 0 0 0 0 0 0 0
33629- 0 0 0 0 0 0 0 0 0 0 0 0
33630- 0 0 0 0 0 0 0 0 0 10 10 10
33631- 22 22 22 42 42 42 66 66 66 86 86 86
33632- 66 66 66 38 38 38 38 38 38 22 22 22
33633- 26 26 26 34 34 34 54 54 54 66 66 66
33634- 86 86 86 70 70 70 46 46 46 26 26 26
33635- 14 14 14 6 6 6 0 0 0 0 0 0
33636- 0 0 0 0 0 0 0 0 0 0 0 0
33637- 0 0 0 0 0 0 0 0 0 0 0 0
33638- 0 0 0 0 0 0 0 0 0 0 0 0
33639- 0 0 0 0 0 0 0 0 0 0 0 0
33640- 0 0 0 0 0 0 0 0 0 0 0 0
33641- 0 0 0 0 0 0 0 0 0 0 0 0
33642- 0 0 0 0 0 0 0 0 0 0 0 0
33643- 0 0 0 0 0 0 0 0 0 0 0 0
33644- 0 0 1 0 0 1 0 0 1 0 0 0
33645- 0 0 0 0 0 0 0 0 0 0 0 0
33646- 0 0 0 0 0 0 0 0 0 0 0 0
33647- 0 0 0 0 0 0 0 0 0 0 0 0
33648- 0 0 0 0 0 0 0 0 0 0 0 0
33649- 0 0 0 0 0 0 0 0 0 0 0 0
33650- 0 0 0 0 0 0 10 10 10 26 26 26
33651- 50 50 50 82 82 82 58 58 58 6 6 6
33652- 2 2 6 2 2 6 2 2 6 2 2 6
33653- 2 2 6 2 2 6 2 2 6 2 2 6
33654- 6 6 6 54 54 54 86 86 86 66 66 66
33655- 38 38 38 18 18 18 6 6 6 0 0 0
33656- 0 0 0 0 0 0 0 0 0 0 0 0
33657- 0 0 0 0 0 0 0 0 0 0 0 0
33658- 0 0 0 0 0 0 0 0 0 0 0 0
33659- 0 0 0 0 0 0 0 0 0 0 0 0
33660- 0 0 0 0 0 0 0 0 0 0 0 0
33661- 0 0 0 0 0 0 0 0 0 0 0 0
33662- 0 0 0 0 0 0 0 0 0 0 0 0
33663- 0 0 0 0 0 0 0 0 0 0 0 0
33664- 0 0 0 0 0 0 0 0 0 0 0 0
33665- 0 0 0 0 0 0 0 0 0 0 0 0
33666- 0 0 0 0 0 0 0 0 0 0 0 0
33667- 0 0 0 0 0 0 0 0 0 0 0 0
33668- 0 0 0 0 0 0 0 0 0 0 0 0
33669- 0 0 0 0 0 0 0 0 0 0 0 0
33670- 0 0 0 6 6 6 22 22 22 50 50 50
33671- 78 78 78 34 34 34 2 2 6 2 2 6
33672- 2 2 6 2 2 6 2 2 6 2 2 6
33673- 2 2 6 2 2 6 2 2 6 2 2 6
33674- 2 2 6 2 2 6 6 6 6 70 70 70
33675- 78 78 78 46 46 46 22 22 22 6 6 6
33676- 0 0 0 0 0 0 0 0 0 0 0 0
33677- 0 0 0 0 0 0 0 0 0 0 0 0
33678- 0 0 0 0 0 0 0 0 0 0 0 0
33679- 0 0 0 0 0 0 0 0 0 0 0 0
33680- 0 0 0 0 0 0 0 0 0 0 0 0
33681- 0 0 0 0 0 0 0 0 0 0 0 0
33682- 0 0 0 0 0 0 0 0 0 0 0 0
33683- 0 0 0 0 0 0 0 0 0 0 0 0
33684- 0 0 1 0 0 1 0 0 1 0 0 0
33685- 0 0 0 0 0 0 0 0 0 0 0 0
33686- 0 0 0 0 0 0 0 0 0 0 0 0
33687- 0 0 0 0 0 0 0 0 0 0 0 0
33688- 0 0 0 0 0 0 0 0 0 0 0 0
33689- 0 0 0 0 0 0 0 0 0 0 0 0
33690- 6 6 6 18 18 18 42 42 42 82 82 82
33691- 26 26 26 2 2 6 2 2 6 2 2 6
33692- 2 2 6 2 2 6 2 2 6 2 2 6
33693- 2 2 6 2 2 6 2 2 6 14 14 14
33694- 46 46 46 34 34 34 6 6 6 2 2 6
33695- 42 42 42 78 78 78 42 42 42 18 18 18
33696- 6 6 6 0 0 0 0 0 0 0 0 0
33697- 0 0 0 0 0 0 0 0 0 0 0 0
33698- 0 0 0 0 0 0 0 0 0 0 0 0
33699- 0 0 0 0 0 0 0 0 0 0 0 0
33700- 0 0 0 0 0 0 0 0 0 0 0 0
33701- 0 0 0 0 0 0 0 0 0 0 0 0
33702- 0 0 0 0 0 0 0 0 0 0 0 0
33703- 0 0 0 0 0 0 0 0 0 0 0 0
33704- 0 0 1 0 0 0 0 0 1 0 0 0
33705- 0 0 0 0 0 0 0 0 0 0 0 0
33706- 0 0 0 0 0 0 0 0 0 0 0 0
33707- 0 0 0 0 0 0 0 0 0 0 0 0
33708- 0 0 0 0 0 0 0 0 0 0 0 0
33709- 0 0 0 0 0 0 0 0 0 0 0 0
33710- 10 10 10 30 30 30 66 66 66 58 58 58
33711- 2 2 6 2 2 6 2 2 6 2 2 6
33712- 2 2 6 2 2 6 2 2 6 2 2 6
33713- 2 2 6 2 2 6 2 2 6 26 26 26
33714- 86 86 86 101 101 101 46 46 46 10 10 10
33715- 2 2 6 58 58 58 70 70 70 34 34 34
33716- 10 10 10 0 0 0 0 0 0 0 0 0
33717- 0 0 0 0 0 0 0 0 0 0 0 0
33718- 0 0 0 0 0 0 0 0 0 0 0 0
33719- 0 0 0 0 0 0 0 0 0 0 0 0
33720- 0 0 0 0 0 0 0 0 0 0 0 0
33721- 0 0 0 0 0 0 0 0 0 0 0 0
33722- 0 0 0 0 0 0 0 0 0 0 0 0
33723- 0 0 0 0 0 0 0 0 0 0 0 0
33724- 0 0 1 0 0 1 0 0 1 0 0 0
33725- 0 0 0 0 0 0 0 0 0 0 0 0
33726- 0 0 0 0 0 0 0 0 0 0 0 0
33727- 0 0 0 0 0 0 0 0 0 0 0 0
33728- 0 0 0 0 0 0 0 0 0 0 0 0
33729- 0 0 0 0 0 0 0 0 0 0 0 0
33730- 14 14 14 42 42 42 86 86 86 10 10 10
33731- 2 2 6 2 2 6 2 2 6 2 2 6
33732- 2 2 6 2 2 6 2 2 6 2 2 6
33733- 2 2 6 2 2 6 2 2 6 30 30 30
33734- 94 94 94 94 94 94 58 58 58 26 26 26
33735- 2 2 6 6 6 6 78 78 78 54 54 54
33736- 22 22 22 6 6 6 0 0 0 0 0 0
33737- 0 0 0 0 0 0 0 0 0 0 0 0
33738- 0 0 0 0 0 0 0 0 0 0 0 0
33739- 0 0 0 0 0 0 0 0 0 0 0 0
33740- 0 0 0 0 0 0 0 0 0 0 0 0
33741- 0 0 0 0 0 0 0 0 0 0 0 0
33742- 0 0 0 0 0 0 0 0 0 0 0 0
33743- 0 0 0 0 0 0 0 0 0 0 0 0
33744- 0 0 0 0 0 0 0 0 0 0 0 0
33745- 0 0 0 0 0 0 0 0 0 0 0 0
33746- 0 0 0 0 0 0 0 0 0 0 0 0
33747- 0 0 0 0 0 0 0 0 0 0 0 0
33748- 0 0 0 0 0 0 0 0 0 0 0 0
33749- 0 0 0 0 0 0 0 0 0 6 6 6
33750- 22 22 22 62 62 62 62 62 62 2 2 6
33751- 2 2 6 2 2 6 2 2 6 2 2 6
33752- 2 2 6 2 2 6 2 2 6 2 2 6
33753- 2 2 6 2 2 6 2 2 6 26 26 26
33754- 54 54 54 38 38 38 18 18 18 10 10 10
33755- 2 2 6 2 2 6 34 34 34 82 82 82
33756- 38 38 38 14 14 14 0 0 0 0 0 0
33757- 0 0 0 0 0 0 0 0 0 0 0 0
33758- 0 0 0 0 0 0 0 0 0 0 0 0
33759- 0 0 0 0 0 0 0 0 0 0 0 0
33760- 0 0 0 0 0 0 0 0 0 0 0 0
33761- 0 0 0 0 0 0 0 0 0 0 0 0
33762- 0 0 0 0 0 0 0 0 0 0 0 0
33763- 0 0 0 0 0 0 0 0 0 0 0 0
33764- 0 0 0 0 0 1 0 0 1 0 0 0
33765- 0 0 0 0 0 0 0 0 0 0 0 0
33766- 0 0 0 0 0 0 0 0 0 0 0 0
33767- 0 0 0 0 0 0 0 0 0 0 0 0
33768- 0 0 0 0 0 0 0 0 0 0 0 0
33769- 0 0 0 0 0 0 0 0 0 6 6 6
33770- 30 30 30 78 78 78 30 30 30 2 2 6
33771- 2 2 6 2 2 6 2 2 6 2 2 6
33772- 2 2 6 2 2 6 2 2 6 2 2 6
33773- 2 2 6 2 2 6 2 2 6 10 10 10
33774- 10 10 10 2 2 6 2 2 6 2 2 6
33775- 2 2 6 2 2 6 2 2 6 78 78 78
33776- 50 50 50 18 18 18 6 6 6 0 0 0
33777- 0 0 0 0 0 0 0 0 0 0 0 0
33778- 0 0 0 0 0 0 0 0 0 0 0 0
33779- 0 0 0 0 0 0 0 0 0 0 0 0
33780- 0 0 0 0 0 0 0 0 0 0 0 0
33781- 0 0 0 0 0 0 0 0 0 0 0 0
33782- 0 0 0 0 0 0 0 0 0 0 0 0
33783- 0 0 0 0 0 0 0 0 0 0 0 0
33784- 0 0 1 0 0 0 0 0 0 0 0 0
33785- 0 0 0 0 0 0 0 0 0 0 0 0
33786- 0 0 0 0 0 0 0 0 0 0 0 0
33787- 0 0 0 0 0 0 0 0 0 0 0 0
33788- 0 0 0 0 0 0 0 0 0 0 0 0
33789- 0 0 0 0 0 0 0 0 0 10 10 10
33790- 38 38 38 86 86 86 14 14 14 2 2 6
33791- 2 2 6 2 2 6 2 2 6 2 2 6
33792- 2 2 6 2 2 6 2 2 6 2 2 6
33793- 2 2 6 2 2 6 2 2 6 2 2 6
33794- 2 2 6 2 2 6 2 2 6 2 2 6
33795- 2 2 6 2 2 6 2 2 6 54 54 54
33796- 66 66 66 26 26 26 6 6 6 0 0 0
33797- 0 0 0 0 0 0 0 0 0 0 0 0
33798- 0 0 0 0 0 0 0 0 0 0 0 0
33799- 0 0 0 0 0 0 0 0 0 0 0 0
33800- 0 0 0 0 0 0 0 0 0 0 0 0
33801- 0 0 0 0 0 0 0 0 0 0 0 0
33802- 0 0 0 0 0 0 0 0 0 0 0 0
33803- 0 0 0 0 0 0 0 0 0 0 0 0
33804- 0 0 0 0 0 1 0 0 1 0 0 0
33805- 0 0 0 0 0 0 0 0 0 0 0 0
33806- 0 0 0 0 0 0 0 0 0 0 0 0
33807- 0 0 0 0 0 0 0 0 0 0 0 0
33808- 0 0 0 0 0 0 0 0 0 0 0 0
33809- 0 0 0 0 0 0 0 0 0 14 14 14
33810- 42 42 42 82 82 82 2 2 6 2 2 6
33811- 2 2 6 6 6 6 10 10 10 2 2 6
33812- 2 2 6 2 2 6 2 2 6 2 2 6
33813- 2 2 6 2 2 6 2 2 6 6 6 6
33814- 14 14 14 10 10 10 2 2 6 2 2 6
33815- 2 2 6 2 2 6 2 2 6 18 18 18
33816- 82 82 82 34 34 34 10 10 10 0 0 0
33817- 0 0 0 0 0 0 0 0 0 0 0 0
33818- 0 0 0 0 0 0 0 0 0 0 0 0
33819- 0 0 0 0 0 0 0 0 0 0 0 0
33820- 0 0 0 0 0 0 0 0 0 0 0 0
33821- 0 0 0 0 0 0 0 0 0 0 0 0
33822- 0 0 0 0 0 0 0 0 0 0 0 0
33823- 0 0 0 0 0 0 0 0 0 0 0 0
33824- 0 0 1 0 0 0 0 0 0 0 0 0
33825- 0 0 0 0 0 0 0 0 0 0 0 0
33826- 0 0 0 0 0 0 0 0 0 0 0 0
33827- 0 0 0 0 0 0 0 0 0 0 0 0
33828- 0 0 0 0 0 0 0 0 0 0 0 0
33829- 0 0 0 0 0 0 0 0 0 14 14 14
33830- 46 46 46 86 86 86 2 2 6 2 2 6
33831- 6 6 6 6 6 6 22 22 22 34 34 34
33832- 6 6 6 2 2 6 2 2 6 2 2 6
33833- 2 2 6 2 2 6 18 18 18 34 34 34
33834- 10 10 10 50 50 50 22 22 22 2 2 6
33835- 2 2 6 2 2 6 2 2 6 10 10 10
33836- 86 86 86 42 42 42 14 14 14 0 0 0
33837- 0 0 0 0 0 0 0 0 0 0 0 0
33838- 0 0 0 0 0 0 0 0 0 0 0 0
33839- 0 0 0 0 0 0 0 0 0 0 0 0
33840- 0 0 0 0 0 0 0 0 0 0 0 0
33841- 0 0 0 0 0 0 0 0 0 0 0 0
33842- 0 0 0 0 0 0 0 0 0 0 0 0
33843- 0 0 0 0 0 0 0 0 0 0 0 0
33844- 0 0 1 0 0 1 0 0 1 0 0 0
33845- 0 0 0 0 0 0 0 0 0 0 0 0
33846- 0 0 0 0 0 0 0 0 0 0 0 0
33847- 0 0 0 0 0 0 0 0 0 0 0 0
33848- 0 0 0 0 0 0 0 0 0 0 0 0
33849- 0 0 0 0 0 0 0 0 0 14 14 14
33850- 46 46 46 86 86 86 2 2 6 2 2 6
33851- 38 38 38 116 116 116 94 94 94 22 22 22
33852- 22 22 22 2 2 6 2 2 6 2 2 6
33853- 14 14 14 86 86 86 138 138 138 162 162 162
33854-154 154 154 38 38 38 26 26 26 6 6 6
33855- 2 2 6 2 2 6 2 2 6 2 2 6
33856- 86 86 86 46 46 46 14 14 14 0 0 0
33857- 0 0 0 0 0 0 0 0 0 0 0 0
33858- 0 0 0 0 0 0 0 0 0 0 0 0
33859- 0 0 0 0 0 0 0 0 0 0 0 0
33860- 0 0 0 0 0 0 0 0 0 0 0 0
33861- 0 0 0 0 0 0 0 0 0 0 0 0
33862- 0 0 0 0 0 0 0 0 0 0 0 0
33863- 0 0 0 0 0 0 0 0 0 0 0 0
33864- 0 0 0 0 0 0 0 0 0 0 0 0
33865- 0 0 0 0 0 0 0 0 0 0 0 0
33866- 0 0 0 0 0 0 0 0 0 0 0 0
33867- 0 0 0 0 0 0 0 0 0 0 0 0
33868- 0 0 0 0 0 0 0 0 0 0 0 0
33869- 0 0 0 0 0 0 0 0 0 14 14 14
33870- 46 46 46 86 86 86 2 2 6 14 14 14
33871-134 134 134 198 198 198 195 195 195 116 116 116
33872- 10 10 10 2 2 6 2 2 6 6 6 6
33873-101 98 89 187 187 187 210 210 210 218 218 218
33874-214 214 214 134 134 134 14 14 14 6 6 6
33875- 2 2 6 2 2 6 2 2 6 2 2 6
33876- 86 86 86 50 50 50 18 18 18 6 6 6
33877- 0 0 0 0 0 0 0 0 0 0 0 0
33878- 0 0 0 0 0 0 0 0 0 0 0 0
33879- 0 0 0 0 0 0 0 0 0 0 0 0
33880- 0 0 0 0 0 0 0 0 0 0 0 0
33881- 0 0 0 0 0 0 0 0 0 0 0 0
33882- 0 0 0 0 0 0 0 0 0 0 0 0
33883- 0 0 0 0 0 0 0 0 1 0 0 0
33884- 0 0 1 0 0 1 0 0 1 0 0 0
33885- 0 0 0 0 0 0 0 0 0 0 0 0
33886- 0 0 0 0 0 0 0 0 0 0 0 0
33887- 0 0 0 0 0 0 0 0 0 0 0 0
33888- 0 0 0 0 0 0 0 0 0 0 0 0
33889- 0 0 0 0 0 0 0 0 0 14 14 14
33890- 46 46 46 86 86 86 2 2 6 54 54 54
33891-218 218 218 195 195 195 226 226 226 246 246 246
33892- 58 58 58 2 2 6 2 2 6 30 30 30
33893-210 210 210 253 253 253 174 174 174 123 123 123
33894-221 221 221 234 234 234 74 74 74 2 2 6
33895- 2 2 6 2 2 6 2 2 6 2 2 6
33896- 70 70 70 58 58 58 22 22 22 6 6 6
33897- 0 0 0 0 0 0 0 0 0 0 0 0
33898- 0 0 0 0 0 0 0 0 0 0 0 0
33899- 0 0 0 0 0 0 0 0 0 0 0 0
33900- 0 0 0 0 0 0 0 0 0 0 0 0
33901- 0 0 0 0 0 0 0 0 0 0 0 0
33902- 0 0 0 0 0 0 0 0 0 0 0 0
33903- 0 0 0 0 0 0 0 0 0 0 0 0
33904- 0 0 0 0 0 0 0 0 0 0 0 0
33905- 0 0 0 0 0 0 0 0 0 0 0 0
33906- 0 0 0 0 0 0 0 0 0 0 0 0
33907- 0 0 0 0 0 0 0 0 0 0 0 0
33908- 0 0 0 0 0 0 0 0 0 0 0 0
33909- 0 0 0 0 0 0 0 0 0 14 14 14
33910- 46 46 46 82 82 82 2 2 6 106 106 106
33911-170 170 170 26 26 26 86 86 86 226 226 226
33912-123 123 123 10 10 10 14 14 14 46 46 46
33913-231 231 231 190 190 190 6 6 6 70 70 70
33914- 90 90 90 238 238 238 158 158 158 2 2 6
33915- 2 2 6 2 2 6 2 2 6 2 2 6
33916- 70 70 70 58 58 58 22 22 22 6 6 6
33917- 0 0 0 0 0 0 0 0 0 0 0 0
33918- 0 0 0 0 0 0 0 0 0 0 0 0
33919- 0 0 0 0 0 0 0 0 0 0 0 0
33920- 0 0 0 0 0 0 0 0 0 0 0 0
33921- 0 0 0 0 0 0 0 0 0 0 0 0
33922- 0 0 0 0 0 0 0 0 0 0 0 0
33923- 0 0 0 0 0 0 0 0 1 0 0 0
33924- 0 0 1 0 0 1 0 0 1 0 0 0
33925- 0 0 0 0 0 0 0 0 0 0 0 0
33926- 0 0 0 0 0 0 0 0 0 0 0 0
33927- 0 0 0 0 0 0 0 0 0 0 0 0
33928- 0 0 0 0 0 0 0 0 0 0 0 0
33929- 0 0 0 0 0 0 0 0 0 14 14 14
33930- 42 42 42 86 86 86 6 6 6 116 116 116
33931-106 106 106 6 6 6 70 70 70 149 149 149
33932-128 128 128 18 18 18 38 38 38 54 54 54
33933-221 221 221 106 106 106 2 2 6 14 14 14
33934- 46 46 46 190 190 190 198 198 198 2 2 6
33935- 2 2 6 2 2 6 2 2 6 2 2 6
33936- 74 74 74 62 62 62 22 22 22 6 6 6
33937- 0 0 0 0 0 0 0 0 0 0 0 0
33938- 0 0 0 0 0 0 0 0 0 0 0 0
33939- 0 0 0 0 0 0 0 0 0 0 0 0
33940- 0 0 0 0 0 0 0 0 0 0 0 0
33941- 0 0 0 0 0 0 0 0 0 0 0 0
33942- 0 0 0 0 0 0 0 0 0 0 0 0
33943- 0 0 0 0 0 0 0 0 1 0 0 0
33944- 0 0 1 0 0 0 0 0 1 0 0 0
33945- 0 0 0 0 0 0 0 0 0 0 0 0
33946- 0 0 0 0 0 0 0 0 0 0 0 0
33947- 0 0 0 0 0 0 0 0 0 0 0 0
33948- 0 0 0 0 0 0 0 0 0 0 0 0
33949- 0 0 0 0 0 0 0 0 0 14 14 14
33950- 42 42 42 94 94 94 14 14 14 101 101 101
33951-128 128 128 2 2 6 18 18 18 116 116 116
33952-118 98 46 121 92 8 121 92 8 98 78 10
33953-162 162 162 106 106 106 2 2 6 2 2 6
33954- 2 2 6 195 195 195 195 195 195 6 6 6
33955- 2 2 6 2 2 6 2 2 6 2 2 6
33956- 74 74 74 62 62 62 22 22 22 6 6 6
33957- 0 0 0 0 0 0 0 0 0 0 0 0
33958- 0 0 0 0 0 0 0 0 0 0 0 0
33959- 0 0 0 0 0 0 0 0 0 0 0 0
33960- 0 0 0 0 0 0 0 0 0 0 0 0
33961- 0 0 0 0 0 0 0 0 0 0 0 0
33962- 0 0 0 0 0 0 0 0 0 0 0 0
33963- 0 0 0 0 0 0 0 0 1 0 0 1
33964- 0 0 1 0 0 0 0 0 1 0 0 0
33965- 0 0 0 0 0 0 0 0 0 0 0 0
33966- 0 0 0 0 0 0 0 0 0 0 0 0
33967- 0 0 0 0 0 0 0 0 0 0 0 0
33968- 0 0 0 0 0 0 0 0 0 0 0 0
33969- 0 0 0 0 0 0 0 0 0 10 10 10
33970- 38 38 38 90 90 90 14 14 14 58 58 58
33971-210 210 210 26 26 26 54 38 6 154 114 10
33972-226 170 11 236 186 11 225 175 15 184 144 12
33973-215 174 15 175 146 61 37 26 9 2 2 6
33974- 70 70 70 246 246 246 138 138 138 2 2 6
33975- 2 2 6 2 2 6 2 2 6 2 2 6
33976- 70 70 70 66 66 66 26 26 26 6 6 6
33977- 0 0 0 0 0 0 0 0 0 0 0 0
33978- 0 0 0 0 0 0 0 0 0 0 0 0
33979- 0 0 0 0 0 0 0 0 0 0 0 0
33980- 0 0 0 0 0 0 0 0 0 0 0 0
33981- 0 0 0 0 0 0 0 0 0 0 0 0
33982- 0 0 0 0 0 0 0 0 0 0 0 0
33983- 0 0 0 0 0 0 0 0 0 0 0 0
33984- 0 0 0 0 0 0 0 0 0 0 0 0
33985- 0 0 0 0 0 0 0 0 0 0 0 0
33986- 0 0 0 0 0 0 0 0 0 0 0 0
33987- 0 0 0 0 0 0 0 0 0 0 0 0
33988- 0 0 0 0 0 0 0 0 0 0 0 0
33989- 0 0 0 0 0 0 0 0 0 10 10 10
33990- 38 38 38 86 86 86 14 14 14 10 10 10
33991-195 195 195 188 164 115 192 133 9 225 175 15
33992-239 182 13 234 190 10 232 195 16 232 200 30
33993-245 207 45 241 208 19 232 195 16 184 144 12
33994-218 194 134 211 206 186 42 42 42 2 2 6
33995- 2 2 6 2 2 6 2 2 6 2 2 6
33996- 50 50 50 74 74 74 30 30 30 6 6 6
33997- 0 0 0 0 0 0 0 0 0 0 0 0
33998- 0 0 0 0 0 0 0 0 0 0 0 0
33999- 0 0 0 0 0 0 0 0 0 0 0 0
34000- 0 0 0 0 0 0 0 0 0 0 0 0
34001- 0 0 0 0 0 0 0 0 0 0 0 0
34002- 0 0 0 0 0 0 0 0 0 0 0 0
34003- 0 0 0 0 0 0 0 0 0 0 0 0
34004- 0 0 0 0 0 0 0 0 0 0 0 0
34005- 0 0 0 0 0 0 0 0 0 0 0 0
34006- 0 0 0 0 0 0 0 0 0 0 0 0
34007- 0 0 0 0 0 0 0 0 0 0 0 0
34008- 0 0 0 0 0 0 0 0 0 0 0 0
34009- 0 0 0 0 0 0 0 0 0 10 10 10
34010- 34 34 34 86 86 86 14 14 14 2 2 6
34011-121 87 25 192 133 9 219 162 10 239 182 13
34012-236 186 11 232 195 16 241 208 19 244 214 54
34013-246 218 60 246 218 38 246 215 20 241 208 19
34014-241 208 19 226 184 13 121 87 25 2 2 6
34015- 2 2 6 2 2 6 2 2 6 2 2 6
34016- 50 50 50 82 82 82 34 34 34 10 10 10
34017- 0 0 0 0 0 0 0 0 0 0 0 0
34018- 0 0 0 0 0 0 0 0 0 0 0 0
34019- 0 0 0 0 0 0 0 0 0 0 0 0
34020- 0 0 0 0 0 0 0 0 0 0 0 0
34021- 0 0 0 0 0 0 0 0 0 0 0 0
34022- 0 0 0 0 0 0 0 0 0 0 0 0
34023- 0 0 0 0 0 0 0 0 0 0 0 0
34024- 0 0 0 0 0 0 0 0 0 0 0 0
34025- 0 0 0 0 0 0 0 0 0 0 0 0
34026- 0 0 0 0 0 0 0 0 0 0 0 0
34027- 0 0 0 0 0 0 0 0 0 0 0 0
34028- 0 0 0 0 0 0 0 0 0 0 0 0
34029- 0 0 0 0 0 0 0 0 0 10 10 10
34030- 34 34 34 82 82 82 30 30 30 61 42 6
34031-180 123 7 206 145 10 230 174 11 239 182 13
34032-234 190 10 238 202 15 241 208 19 246 218 74
34033-246 218 38 246 215 20 246 215 20 246 215 20
34034-226 184 13 215 174 15 184 144 12 6 6 6
34035- 2 2 6 2 2 6 2 2 6 2 2 6
34036- 26 26 26 94 94 94 42 42 42 14 14 14
34037- 0 0 0 0 0 0 0 0 0 0 0 0
34038- 0 0 0 0 0 0 0 0 0 0 0 0
34039- 0 0 0 0 0 0 0 0 0 0 0 0
34040- 0 0 0 0 0 0 0 0 0 0 0 0
34041- 0 0 0 0 0 0 0 0 0 0 0 0
34042- 0 0 0 0 0 0 0 0 0 0 0 0
34043- 0 0 0 0 0 0 0 0 0 0 0 0
34044- 0 0 0 0 0 0 0 0 0 0 0 0
34045- 0 0 0 0 0 0 0 0 0 0 0 0
34046- 0 0 0 0 0 0 0 0 0 0 0 0
34047- 0 0 0 0 0 0 0 0 0 0 0 0
34048- 0 0 0 0 0 0 0 0 0 0 0 0
34049- 0 0 0 0 0 0 0 0 0 10 10 10
34050- 30 30 30 78 78 78 50 50 50 104 69 6
34051-192 133 9 216 158 10 236 178 12 236 186 11
34052-232 195 16 241 208 19 244 214 54 245 215 43
34053-246 215 20 246 215 20 241 208 19 198 155 10
34054-200 144 11 216 158 10 156 118 10 2 2 6
34055- 2 2 6 2 2 6 2 2 6 2 2 6
34056- 6 6 6 90 90 90 54 54 54 18 18 18
34057- 6 6 6 0 0 0 0 0 0 0 0 0
34058- 0 0 0 0 0 0 0 0 0 0 0 0
34059- 0 0 0 0 0 0 0 0 0 0 0 0
34060- 0 0 0 0 0 0 0 0 0 0 0 0
34061- 0 0 0 0 0 0 0 0 0 0 0 0
34062- 0 0 0 0 0 0 0 0 0 0 0 0
34063- 0 0 0 0 0 0 0 0 0 0 0 0
34064- 0 0 0 0 0 0 0 0 0 0 0 0
34065- 0 0 0 0 0 0 0 0 0 0 0 0
34066- 0 0 0 0 0 0 0 0 0 0 0 0
34067- 0 0 0 0 0 0 0 0 0 0 0 0
34068- 0 0 0 0 0 0 0 0 0 0 0 0
34069- 0 0 0 0 0 0 0 0 0 10 10 10
34070- 30 30 30 78 78 78 46 46 46 22 22 22
34071-137 92 6 210 162 10 239 182 13 238 190 10
34072-238 202 15 241 208 19 246 215 20 246 215 20
34073-241 208 19 203 166 17 185 133 11 210 150 10
34074-216 158 10 210 150 10 102 78 10 2 2 6
34075- 6 6 6 54 54 54 14 14 14 2 2 6
34076- 2 2 6 62 62 62 74 74 74 30 30 30
34077- 10 10 10 0 0 0 0 0 0 0 0 0
34078- 0 0 0 0 0 0 0 0 0 0 0 0
34079- 0 0 0 0 0 0 0 0 0 0 0 0
34080- 0 0 0 0 0 0 0 0 0 0 0 0
34081- 0 0 0 0 0 0 0 0 0 0 0 0
34082- 0 0 0 0 0 0 0 0 0 0 0 0
34083- 0 0 0 0 0 0 0 0 0 0 0 0
34084- 0 0 0 0 0 0 0 0 0 0 0 0
34085- 0 0 0 0 0 0 0 0 0 0 0 0
34086- 0 0 0 0 0 0 0 0 0 0 0 0
34087- 0 0 0 0 0 0 0 0 0 0 0 0
34088- 0 0 0 0 0 0 0 0 0 0 0 0
34089- 0 0 0 0 0 0 0 0 0 10 10 10
34090- 34 34 34 78 78 78 50 50 50 6 6 6
34091- 94 70 30 139 102 15 190 146 13 226 184 13
34092-232 200 30 232 195 16 215 174 15 190 146 13
34093-168 122 10 192 133 9 210 150 10 213 154 11
34094-202 150 34 182 157 106 101 98 89 2 2 6
34095- 2 2 6 78 78 78 116 116 116 58 58 58
34096- 2 2 6 22 22 22 90 90 90 46 46 46
34097- 18 18 18 6 6 6 0 0 0 0 0 0
34098- 0 0 0 0 0 0 0 0 0 0 0 0
34099- 0 0 0 0 0 0 0 0 0 0 0 0
34100- 0 0 0 0 0 0 0 0 0 0 0 0
34101- 0 0 0 0 0 0 0 0 0 0 0 0
34102- 0 0 0 0 0 0 0 0 0 0 0 0
34103- 0 0 0 0 0 0 0 0 0 0 0 0
34104- 0 0 0 0 0 0 0 0 0 0 0 0
34105- 0 0 0 0 0 0 0 0 0 0 0 0
34106- 0 0 0 0 0 0 0 0 0 0 0 0
34107- 0 0 0 0 0 0 0 0 0 0 0 0
34108- 0 0 0 0 0 0 0 0 0 0 0 0
34109- 0 0 0 0 0 0 0 0 0 10 10 10
34110- 38 38 38 86 86 86 50 50 50 6 6 6
34111-128 128 128 174 154 114 156 107 11 168 122 10
34112-198 155 10 184 144 12 197 138 11 200 144 11
34113-206 145 10 206 145 10 197 138 11 188 164 115
34114-195 195 195 198 198 198 174 174 174 14 14 14
34115- 2 2 6 22 22 22 116 116 116 116 116 116
34116- 22 22 22 2 2 6 74 74 74 70 70 70
34117- 30 30 30 10 10 10 0 0 0 0 0 0
34118- 0 0 0 0 0 0 0 0 0 0 0 0
34119- 0 0 0 0 0 0 0 0 0 0 0 0
34120- 0 0 0 0 0 0 0 0 0 0 0 0
34121- 0 0 0 0 0 0 0 0 0 0 0 0
34122- 0 0 0 0 0 0 0 0 0 0 0 0
34123- 0 0 0 0 0 0 0 0 0 0 0 0
34124- 0 0 0 0 0 0 0 0 0 0 0 0
34125- 0 0 0 0 0 0 0 0 0 0 0 0
34126- 0 0 0 0 0 0 0 0 0 0 0 0
34127- 0 0 0 0 0 0 0 0 0 0 0 0
34128- 0 0 0 0 0 0 0 0 0 0 0 0
34129- 0 0 0 0 0 0 6 6 6 18 18 18
34130- 50 50 50 101 101 101 26 26 26 10 10 10
34131-138 138 138 190 190 190 174 154 114 156 107 11
34132-197 138 11 200 144 11 197 138 11 192 133 9
34133-180 123 7 190 142 34 190 178 144 187 187 187
34134-202 202 202 221 221 221 214 214 214 66 66 66
34135- 2 2 6 2 2 6 50 50 50 62 62 62
34136- 6 6 6 2 2 6 10 10 10 90 90 90
34137- 50 50 50 18 18 18 6 6 6 0 0 0
34138- 0 0 0 0 0 0 0 0 0 0 0 0
34139- 0 0 0 0 0 0 0 0 0 0 0 0
34140- 0 0 0 0 0 0 0 0 0 0 0 0
34141- 0 0 0 0 0 0 0 0 0 0 0 0
34142- 0 0 0 0 0 0 0 0 0 0 0 0
34143- 0 0 0 0 0 0 0 0 0 0 0 0
34144- 0 0 0 0 0 0 0 0 0 0 0 0
34145- 0 0 0 0 0 0 0 0 0 0 0 0
34146- 0 0 0 0 0 0 0 0 0 0 0 0
34147- 0 0 0 0 0 0 0 0 0 0 0 0
34148- 0 0 0 0 0 0 0 0 0 0 0 0
34149- 0 0 0 0 0 0 10 10 10 34 34 34
34150- 74 74 74 74 74 74 2 2 6 6 6 6
34151-144 144 144 198 198 198 190 190 190 178 166 146
34152-154 121 60 156 107 11 156 107 11 168 124 44
34153-174 154 114 187 187 187 190 190 190 210 210 210
34154-246 246 246 253 253 253 253 253 253 182 182 182
34155- 6 6 6 2 2 6 2 2 6 2 2 6
34156- 2 2 6 2 2 6 2 2 6 62 62 62
34157- 74 74 74 34 34 34 14 14 14 0 0 0
34158- 0 0 0 0 0 0 0 0 0 0 0 0
34159- 0 0 0 0 0 0 0 0 0 0 0 0
34160- 0 0 0 0 0 0 0 0 0 0 0 0
34161- 0 0 0 0 0 0 0 0 0 0 0 0
34162- 0 0 0 0 0 0 0 0 0 0 0 0
34163- 0 0 0 0 0 0 0 0 0 0 0 0
34164- 0 0 0 0 0 0 0 0 0 0 0 0
34165- 0 0 0 0 0 0 0 0 0 0 0 0
34166- 0 0 0 0 0 0 0 0 0 0 0 0
34167- 0 0 0 0 0 0 0 0 0 0 0 0
34168- 0 0 0 0 0 0 0 0 0 0 0 0
34169- 0 0 0 10 10 10 22 22 22 54 54 54
34170- 94 94 94 18 18 18 2 2 6 46 46 46
34171-234 234 234 221 221 221 190 190 190 190 190 190
34172-190 190 190 187 187 187 187 187 187 190 190 190
34173-190 190 190 195 195 195 214 214 214 242 242 242
34174-253 253 253 253 253 253 253 253 253 253 253 253
34175- 82 82 82 2 2 6 2 2 6 2 2 6
34176- 2 2 6 2 2 6 2 2 6 14 14 14
34177- 86 86 86 54 54 54 22 22 22 6 6 6
34178- 0 0 0 0 0 0 0 0 0 0 0 0
34179- 0 0 0 0 0 0 0 0 0 0 0 0
34180- 0 0 0 0 0 0 0 0 0 0 0 0
34181- 0 0 0 0 0 0 0 0 0 0 0 0
34182- 0 0 0 0 0 0 0 0 0 0 0 0
34183- 0 0 0 0 0 0 0 0 0 0 0 0
34184- 0 0 0 0 0 0 0 0 0 0 0 0
34185- 0 0 0 0 0 0 0 0 0 0 0 0
34186- 0 0 0 0 0 0 0 0 0 0 0 0
34187- 0 0 0 0 0 0 0 0 0 0 0 0
34188- 0 0 0 0 0 0 0 0 0 0 0 0
34189- 6 6 6 18 18 18 46 46 46 90 90 90
34190- 46 46 46 18 18 18 6 6 6 182 182 182
34191-253 253 253 246 246 246 206 206 206 190 190 190
34192-190 190 190 190 190 190 190 190 190 190 190 190
34193-206 206 206 231 231 231 250 250 250 253 253 253
34194-253 253 253 253 253 253 253 253 253 253 253 253
34195-202 202 202 14 14 14 2 2 6 2 2 6
34196- 2 2 6 2 2 6 2 2 6 2 2 6
34197- 42 42 42 86 86 86 42 42 42 18 18 18
34198- 6 6 6 0 0 0 0 0 0 0 0 0
34199- 0 0 0 0 0 0 0 0 0 0 0 0
34200- 0 0 0 0 0 0 0 0 0 0 0 0
34201- 0 0 0 0 0 0 0 0 0 0 0 0
34202- 0 0 0 0 0 0 0 0 0 0 0 0
34203- 0 0 0 0 0 0 0 0 0 0 0 0
34204- 0 0 0 0 0 0 0 0 0 0 0 0
34205- 0 0 0 0 0 0 0 0 0 0 0 0
34206- 0 0 0 0 0 0 0 0 0 0 0 0
34207- 0 0 0 0 0 0 0 0 0 0 0 0
34208- 0 0 0 0 0 0 0 0 0 6 6 6
34209- 14 14 14 38 38 38 74 74 74 66 66 66
34210- 2 2 6 6 6 6 90 90 90 250 250 250
34211-253 253 253 253 253 253 238 238 238 198 198 198
34212-190 190 190 190 190 190 195 195 195 221 221 221
34213-246 246 246 253 253 253 253 253 253 253 253 253
34214-253 253 253 253 253 253 253 253 253 253 253 253
34215-253 253 253 82 82 82 2 2 6 2 2 6
34216- 2 2 6 2 2 6 2 2 6 2 2 6
34217- 2 2 6 78 78 78 70 70 70 34 34 34
34218- 14 14 14 6 6 6 0 0 0 0 0 0
34219- 0 0 0 0 0 0 0 0 0 0 0 0
34220- 0 0 0 0 0 0 0 0 0 0 0 0
34221- 0 0 0 0 0 0 0 0 0 0 0 0
34222- 0 0 0 0 0 0 0 0 0 0 0 0
34223- 0 0 0 0 0 0 0 0 0 0 0 0
34224- 0 0 0 0 0 0 0 0 0 0 0 0
34225- 0 0 0 0 0 0 0 0 0 0 0 0
34226- 0 0 0 0 0 0 0 0 0 0 0 0
34227- 0 0 0 0 0 0 0 0 0 0 0 0
34228- 0 0 0 0 0 0 0 0 0 14 14 14
34229- 34 34 34 66 66 66 78 78 78 6 6 6
34230- 2 2 6 18 18 18 218 218 218 253 253 253
34231-253 253 253 253 253 253 253 253 253 246 246 246
34232-226 226 226 231 231 231 246 246 246 253 253 253
34233-253 253 253 253 253 253 253 253 253 253 253 253
34234-253 253 253 253 253 253 253 253 253 253 253 253
34235-253 253 253 178 178 178 2 2 6 2 2 6
34236- 2 2 6 2 2 6 2 2 6 2 2 6
34237- 2 2 6 18 18 18 90 90 90 62 62 62
34238- 30 30 30 10 10 10 0 0 0 0 0 0
34239- 0 0 0 0 0 0 0 0 0 0 0 0
34240- 0 0 0 0 0 0 0 0 0 0 0 0
34241- 0 0 0 0 0 0 0 0 0 0 0 0
34242- 0 0 0 0 0 0 0 0 0 0 0 0
34243- 0 0 0 0 0 0 0 0 0 0 0 0
34244- 0 0 0 0 0 0 0 0 0 0 0 0
34245- 0 0 0 0 0 0 0 0 0 0 0 0
34246- 0 0 0 0 0 0 0 0 0 0 0 0
34247- 0 0 0 0 0 0 0 0 0 0 0 0
34248- 0 0 0 0 0 0 10 10 10 26 26 26
34249- 58 58 58 90 90 90 18 18 18 2 2 6
34250- 2 2 6 110 110 110 253 253 253 253 253 253
34251-253 253 253 253 253 253 253 253 253 253 253 253
34252-250 250 250 253 253 253 253 253 253 253 253 253
34253-253 253 253 253 253 253 253 253 253 253 253 253
34254-253 253 253 253 253 253 253 253 253 253 253 253
34255-253 253 253 231 231 231 18 18 18 2 2 6
34256- 2 2 6 2 2 6 2 2 6 2 2 6
34257- 2 2 6 2 2 6 18 18 18 94 94 94
34258- 54 54 54 26 26 26 10 10 10 0 0 0
34259- 0 0 0 0 0 0 0 0 0 0 0 0
34260- 0 0 0 0 0 0 0 0 0 0 0 0
34261- 0 0 0 0 0 0 0 0 0 0 0 0
34262- 0 0 0 0 0 0 0 0 0 0 0 0
34263- 0 0 0 0 0 0 0 0 0 0 0 0
34264- 0 0 0 0 0 0 0 0 0 0 0 0
34265- 0 0 0 0 0 0 0 0 0 0 0 0
34266- 0 0 0 0 0 0 0 0 0 0 0 0
34267- 0 0 0 0 0 0 0 0 0 0 0 0
34268- 0 0 0 6 6 6 22 22 22 50 50 50
34269- 90 90 90 26 26 26 2 2 6 2 2 6
34270- 14 14 14 195 195 195 250 250 250 253 253 253
34271-253 253 253 253 253 253 253 253 253 253 253 253
34272-253 253 253 253 253 253 253 253 253 253 253 253
34273-253 253 253 253 253 253 253 253 253 253 253 253
34274-253 253 253 253 253 253 253 253 253 253 253 253
34275-250 250 250 242 242 242 54 54 54 2 2 6
34276- 2 2 6 2 2 6 2 2 6 2 2 6
34277- 2 2 6 2 2 6 2 2 6 38 38 38
34278- 86 86 86 50 50 50 22 22 22 6 6 6
34279- 0 0 0 0 0 0 0 0 0 0 0 0
34280- 0 0 0 0 0 0 0 0 0 0 0 0
34281- 0 0 0 0 0 0 0 0 0 0 0 0
34282- 0 0 0 0 0 0 0 0 0 0 0 0
34283- 0 0 0 0 0 0 0 0 0 0 0 0
34284- 0 0 0 0 0 0 0 0 0 0 0 0
34285- 0 0 0 0 0 0 0 0 0 0 0 0
34286- 0 0 0 0 0 0 0 0 0 0 0 0
34287- 0 0 0 0 0 0 0 0 0 0 0 0
34288- 6 6 6 14 14 14 38 38 38 82 82 82
34289- 34 34 34 2 2 6 2 2 6 2 2 6
34290- 42 42 42 195 195 195 246 246 246 253 253 253
34291-253 253 253 253 253 253 253 253 253 250 250 250
34292-242 242 242 242 242 242 250 250 250 253 253 253
34293-253 253 253 253 253 253 253 253 253 253 253 253
34294-253 253 253 250 250 250 246 246 246 238 238 238
34295-226 226 226 231 231 231 101 101 101 6 6 6
34296- 2 2 6 2 2 6 2 2 6 2 2 6
34297- 2 2 6 2 2 6 2 2 6 2 2 6
34298- 38 38 38 82 82 82 42 42 42 14 14 14
34299- 6 6 6 0 0 0 0 0 0 0 0 0
34300- 0 0 0 0 0 0 0 0 0 0 0 0
34301- 0 0 0 0 0 0 0 0 0 0 0 0
34302- 0 0 0 0 0 0 0 0 0 0 0 0
34303- 0 0 0 0 0 0 0 0 0 0 0 0
34304- 0 0 0 0 0 0 0 0 0 0 0 0
34305- 0 0 0 0 0 0 0 0 0 0 0 0
34306- 0 0 0 0 0 0 0 0 0 0 0 0
34307- 0 0 0 0 0 0 0 0 0 0 0 0
34308- 10 10 10 26 26 26 62 62 62 66 66 66
34309- 2 2 6 2 2 6 2 2 6 6 6 6
34310- 70 70 70 170 170 170 206 206 206 234 234 234
34311-246 246 246 250 250 250 250 250 250 238 238 238
34312-226 226 226 231 231 231 238 238 238 250 250 250
34313-250 250 250 250 250 250 246 246 246 231 231 231
34314-214 214 214 206 206 206 202 202 202 202 202 202
34315-198 198 198 202 202 202 182 182 182 18 18 18
34316- 2 2 6 2 2 6 2 2 6 2 2 6
34317- 2 2 6 2 2 6 2 2 6 2 2 6
34318- 2 2 6 62 62 62 66 66 66 30 30 30
34319- 10 10 10 0 0 0 0 0 0 0 0 0
34320- 0 0 0 0 0 0 0 0 0 0 0 0
34321- 0 0 0 0 0 0 0 0 0 0 0 0
34322- 0 0 0 0 0 0 0 0 0 0 0 0
34323- 0 0 0 0 0 0 0 0 0 0 0 0
34324- 0 0 0 0 0 0 0 0 0 0 0 0
34325- 0 0 0 0 0 0 0 0 0 0 0 0
34326- 0 0 0 0 0 0 0 0 0 0 0 0
34327- 0 0 0 0 0 0 0 0 0 0 0 0
34328- 14 14 14 42 42 42 82 82 82 18 18 18
34329- 2 2 6 2 2 6 2 2 6 10 10 10
34330- 94 94 94 182 182 182 218 218 218 242 242 242
34331-250 250 250 253 253 253 253 253 253 250 250 250
34332-234 234 234 253 253 253 253 253 253 253 253 253
34333-253 253 253 253 253 253 253 253 253 246 246 246
34334-238 238 238 226 226 226 210 210 210 202 202 202
34335-195 195 195 195 195 195 210 210 210 158 158 158
34336- 6 6 6 14 14 14 50 50 50 14 14 14
34337- 2 2 6 2 2 6 2 2 6 2 2 6
34338- 2 2 6 6 6 6 86 86 86 46 46 46
34339- 18 18 18 6 6 6 0 0 0 0 0 0
34340- 0 0 0 0 0 0 0 0 0 0 0 0
34341- 0 0 0 0 0 0 0 0 0 0 0 0
34342- 0 0 0 0 0 0 0 0 0 0 0 0
34343- 0 0 0 0 0 0 0 0 0 0 0 0
34344- 0 0 0 0 0 0 0 0 0 0 0 0
34345- 0 0 0 0 0 0 0 0 0 0 0 0
34346- 0 0 0 0 0 0 0 0 0 0 0 0
34347- 0 0 0 0 0 0 0 0 0 6 6 6
34348- 22 22 22 54 54 54 70 70 70 2 2 6
34349- 2 2 6 10 10 10 2 2 6 22 22 22
34350-166 166 166 231 231 231 250 250 250 253 253 253
34351-253 253 253 253 253 253 253 253 253 250 250 250
34352-242 242 242 253 253 253 253 253 253 253 253 253
34353-253 253 253 253 253 253 253 253 253 253 253 253
34354-253 253 253 253 253 253 253 253 253 246 246 246
34355-231 231 231 206 206 206 198 198 198 226 226 226
34356- 94 94 94 2 2 6 6 6 6 38 38 38
34357- 30 30 30 2 2 6 2 2 6 2 2 6
34358- 2 2 6 2 2 6 62 62 62 66 66 66
34359- 26 26 26 10 10 10 0 0 0 0 0 0
34360- 0 0 0 0 0 0 0 0 0 0 0 0
34361- 0 0 0 0 0 0 0 0 0 0 0 0
34362- 0 0 0 0 0 0 0 0 0 0 0 0
34363- 0 0 0 0 0 0 0 0 0 0 0 0
34364- 0 0 0 0 0 0 0 0 0 0 0 0
34365- 0 0 0 0 0 0 0 0 0 0 0 0
34366- 0 0 0 0 0 0 0 0 0 0 0 0
34367- 0 0 0 0 0 0 0 0 0 10 10 10
34368- 30 30 30 74 74 74 50 50 50 2 2 6
34369- 26 26 26 26 26 26 2 2 6 106 106 106
34370-238 238 238 253 253 253 253 253 253 253 253 253
34371-253 253 253 253 253 253 253 253 253 253 253 253
34372-253 253 253 253 253 253 253 253 253 253 253 253
34373-253 253 253 253 253 253 253 253 253 253 253 253
34374-253 253 253 253 253 253 253 253 253 253 253 253
34375-253 253 253 246 246 246 218 218 218 202 202 202
34376-210 210 210 14 14 14 2 2 6 2 2 6
34377- 30 30 30 22 22 22 2 2 6 2 2 6
34378- 2 2 6 2 2 6 18 18 18 86 86 86
34379- 42 42 42 14 14 14 0 0 0 0 0 0
34380- 0 0 0 0 0 0 0 0 0 0 0 0
34381- 0 0 0 0 0 0 0 0 0 0 0 0
34382- 0 0 0 0 0 0 0 0 0 0 0 0
34383- 0 0 0 0 0 0 0 0 0 0 0 0
34384- 0 0 0 0 0 0 0 0 0 0 0 0
34385- 0 0 0 0 0 0 0 0 0 0 0 0
34386- 0 0 0 0 0 0 0 0 0 0 0 0
34387- 0 0 0 0 0 0 0 0 0 14 14 14
34388- 42 42 42 90 90 90 22 22 22 2 2 6
34389- 42 42 42 2 2 6 18 18 18 218 218 218
34390-253 253 253 253 253 253 253 253 253 253 253 253
34391-253 253 253 253 253 253 253 253 253 253 253 253
34392-253 253 253 253 253 253 253 253 253 253 253 253
34393-253 253 253 253 253 253 253 253 253 253 253 253
34394-253 253 253 253 253 253 253 253 253 253 253 253
34395-253 253 253 253 253 253 250 250 250 221 221 221
34396-218 218 218 101 101 101 2 2 6 14 14 14
34397- 18 18 18 38 38 38 10 10 10 2 2 6
34398- 2 2 6 2 2 6 2 2 6 78 78 78
34399- 58 58 58 22 22 22 6 6 6 0 0 0
34400- 0 0 0 0 0 0 0 0 0 0 0 0
34401- 0 0 0 0 0 0 0 0 0 0 0 0
34402- 0 0 0 0 0 0 0 0 0 0 0 0
34403- 0 0 0 0 0 0 0 0 0 0 0 0
34404- 0 0 0 0 0 0 0 0 0 0 0 0
34405- 0 0 0 0 0 0 0 0 0 0 0 0
34406- 0 0 0 0 0 0 0 0 0 0 0 0
34407- 0 0 0 0 0 0 6 6 6 18 18 18
34408- 54 54 54 82 82 82 2 2 6 26 26 26
34409- 22 22 22 2 2 6 123 123 123 253 253 253
34410-253 253 253 253 253 253 253 253 253 253 253 253
34411-253 253 253 253 253 253 253 253 253 253 253 253
34412-253 253 253 253 253 253 253 253 253 253 253 253
34413-253 253 253 253 253 253 253 253 253 253 253 253
34414-253 253 253 253 253 253 253 253 253 253 253 253
34415-253 253 253 253 253 253 253 253 253 250 250 250
34416-238 238 238 198 198 198 6 6 6 38 38 38
34417- 58 58 58 26 26 26 38 38 38 2 2 6
34418- 2 2 6 2 2 6 2 2 6 46 46 46
34419- 78 78 78 30 30 30 10 10 10 0 0 0
34420- 0 0 0 0 0 0 0 0 0 0 0 0
34421- 0 0 0 0 0 0 0 0 0 0 0 0
34422- 0 0 0 0 0 0 0 0 0 0 0 0
34423- 0 0 0 0 0 0 0 0 0 0 0 0
34424- 0 0 0 0 0 0 0 0 0 0 0 0
34425- 0 0 0 0 0 0 0 0 0 0 0 0
34426- 0 0 0 0 0 0 0 0 0 0 0 0
34427- 0 0 0 0 0 0 10 10 10 30 30 30
34428- 74 74 74 58 58 58 2 2 6 42 42 42
34429- 2 2 6 22 22 22 231 231 231 253 253 253
34430-253 253 253 253 253 253 253 253 253 253 253 253
34431-253 253 253 253 253 253 253 253 253 250 250 250
34432-253 253 253 253 253 253 253 253 253 253 253 253
34433-253 253 253 253 253 253 253 253 253 253 253 253
34434-253 253 253 253 253 253 253 253 253 253 253 253
34435-253 253 253 253 253 253 253 253 253 253 253 253
34436-253 253 253 246 246 246 46 46 46 38 38 38
34437- 42 42 42 14 14 14 38 38 38 14 14 14
34438- 2 2 6 2 2 6 2 2 6 6 6 6
34439- 86 86 86 46 46 46 14 14 14 0 0 0
34440- 0 0 0 0 0 0 0 0 0 0 0 0
34441- 0 0 0 0 0 0 0 0 0 0 0 0
34442- 0 0 0 0 0 0 0 0 0 0 0 0
34443- 0 0 0 0 0 0 0 0 0 0 0 0
34444- 0 0 0 0 0 0 0 0 0 0 0 0
34445- 0 0 0 0 0 0 0 0 0 0 0 0
34446- 0 0 0 0 0 0 0 0 0 0 0 0
34447- 0 0 0 6 6 6 14 14 14 42 42 42
34448- 90 90 90 18 18 18 18 18 18 26 26 26
34449- 2 2 6 116 116 116 253 253 253 253 253 253
34450-253 253 253 253 253 253 253 253 253 253 253 253
34451-253 253 253 253 253 253 250 250 250 238 238 238
34452-253 253 253 253 253 253 253 253 253 253 253 253
34453-253 253 253 253 253 253 253 253 253 253 253 253
34454-253 253 253 253 253 253 253 253 253 253 253 253
34455-253 253 253 253 253 253 253 253 253 253 253 253
34456-253 253 253 253 253 253 94 94 94 6 6 6
34457- 2 2 6 2 2 6 10 10 10 34 34 34
34458- 2 2 6 2 2 6 2 2 6 2 2 6
34459- 74 74 74 58 58 58 22 22 22 6 6 6
34460- 0 0 0 0 0 0 0 0 0 0 0 0
34461- 0 0 0 0 0 0 0 0 0 0 0 0
34462- 0 0 0 0 0 0 0 0 0 0 0 0
34463- 0 0 0 0 0 0 0 0 0 0 0 0
34464- 0 0 0 0 0 0 0 0 0 0 0 0
34465- 0 0 0 0 0 0 0 0 0 0 0 0
34466- 0 0 0 0 0 0 0 0 0 0 0 0
34467- 0 0 0 10 10 10 26 26 26 66 66 66
34468- 82 82 82 2 2 6 38 38 38 6 6 6
34469- 14 14 14 210 210 210 253 253 253 253 253 253
34470-253 253 253 253 253 253 253 253 253 253 253 253
34471-253 253 253 253 253 253 246 246 246 242 242 242
34472-253 253 253 253 253 253 253 253 253 253 253 253
34473-253 253 253 253 253 253 253 253 253 253 253 253
34474-253 253 253 253 253 253 253 253 253 253 253 253
34475-253 253 253 253 253 253 253 253 253 253 253 253
34476-253 253 253 253 253 253 144 144 144 2 2 6
34477- 2 2 6 2 2 6 2 2 6 46 46 46
34478- 2 2 6 2 2 6 2 2 6 2 2 6
34479- 42 42 42 74 74 74 30 30 30 10 10 10
34480- 0 0 0 0 0 0 0 0 0 0 0 0
34481- 0 0 0 0 0 0 0 0 0 0 0 0
34482- 0 0 0 0 0 0 0 0 0 0 0 0
34483- 0 0 0 0 0 0 0 0 0 0 0 0
34484- 0 0 0 0 0 0 0 0 0 0 0 0
34485- 0 0 0 0 0 0 0 0 0 0 0 0
34486- 0 0 0 0 0 0 0 0 0 0 0 0
34487- 6 6 6 14 14 14 42 42 42 90 90 90
34488- 26 26 26 6 6 6 42 42 42 2 2 6
34489- 74 74 74 250 250 250 253 253 253 253 253 253
34490-253 253 253 253 253 253 253 253 253 253 253 253
34491-253 253 253 253 253 253 242 242 242 242 242 242
34492-253 253 253 253 253 253 253 253 253 253 253 253
34493-253 253 253 253 253 253 253 253 253 253 253 253
34494-253 253 253 253 253 253 253 253 253 253 253 253
34495-253 253 253 253 253 253 253 253 253 253 253 253
34496-253 253 253 253 253 253 182 182 182 2 2 6
34497- 2 2 6 2 2 6 2 2 6 46 46 46
34498- 2 2 6 2 2 6 2 2 6 2 2 6
34499- 10 10 10 86 86 86 38 38 38 10 10 10
34500- 0 0 0 0 0 0 0 0 0 0 0 0
34501- 0 0 0 0 0 0 0 0 0 0 0 0
34502- 0 0 0 0 0 0 0 0 0 0 0 0
34503- 0 0 0 0 0 0 0 0 0 0 0 0
34504- 0 0 0 0 0 0 0 0 0 0 0 0
34505- 0 0 0 0 0 0 0 0 0 0 0 0
34506- 0 0 0 0 0 0 0 0 0 0 0 0
34507- 10 10 10 26 26 26 66 66 66 82 82 82
34508- 2 2 6 22 22 22 18 18 18 2 2 6
34509-149 149 149 253 253 253 253 253 253 253 253 253
34510-253 253 253 253 253 253 253 253 253 253 253 253
34511-253 253 253 253 253 253 234 234 234 242 242 242
34512-253 253 253 253 253 253 253 253 253 253 253 253
34513-253 253 253 253 253 253 253 253 253 253 253 253
34514-253 253 253 253 253 253 253 253 253 253 253 253
34515-253 253 253 253 253 253 253 253 253 253 253 253
34516-253 253 253 253 253 253 206 206 206 2 2 6
34517- 2 2 6 2 2 6 2 2 6 38 38 38
34518- 2 2 6 2 2 6 2 2 6 2 2 6
34519- 6 6 6 86 86 86 46 46 46 14 14 14
34520- 0 0 0 0 0 0 0 0 0 0 0 0
34521- 0 0 0 0 0 0 0 0 0 0 0 0
34522- 0 0 0 0 0 0 0 0 0 0 0 0
34523- 0 0 0 0 0 0 0 0 0 0 0 0
34524- 0 0 0 0 0 0 0 0 0 0 0 0
34525- 0 0 0 0 0 0 0 0 0 0 0 0
34526- 0 0 0 0 0 0 0 0 0 6 6 6
34527- 18 18 18 46 46 46 86 86 86 18 18 18
34528- 2 2 6 34 34 34 10 10 10 6 6 6
34529-210 210 210 253 253 253 253 253 253 253 253 253
34530-253 253 253 253 253 253 253 253 253 253 253 253
34531-253 253 253 253 253 253 234 234 234 242 242 242
34532-253 253 253 253 253 253 253 253 253 253 253 253
34533-253 253 253 253 253 253 253 253 253 253 253 253
34534-253 253 253 253 253 253 253 253 253 253 253 253
34535-253 253 253 253 253 253 253 253 253 253 253 253
34536-253 253 253 253 253 253 221 221 221 6 6 6
34537- 2 2 6 2 2 6 6 6 6 30 30 30
34538- 2 2 6 2 2 6 2 2 6 2 2 6
34539- 2 2 6 82 82 82 54 54 54 18 18 18
34540- 6 6 6 0 0 0 0 0 0 0 0 0
34541- 0 0 0 0 0 0 0 0 0 0 0 0
34542- 0 0 0 0 0 0 0 0 0 0 0 0
34543- 0 0 0 0 0 0 0 0 0 0 0 0
34544- 0 0 0 0 0 0 0 0 0 0 0 0
34545- 0 0 0 0 0 0 0 0 0 0 0 0
34546- 0 0 0 0 0 0 0 0 0 10 10 10
34547- 26 26 26 66 66 66 62 62 62 2 2 6
34548- 2 2 6 38 38 38 10 10 10 26 26 26
34549-238 238 238 253 253 253 253 253 253 253 253 253
34550-253 253 253 253 253 253 253 253 253 253 253 253
34551-253 253 253 253 253 253 231 231 231 238 238 238
34552-253 253 253 253 253 253 253 253 253 253 253 253
34553-253 253 253 253 253 253 253 253 253 253 253 253
34554-253 253 253 253 253 253 253 253 253 253 253 253
34555-253 253 253 253 253 253 253 253 253 253 253 253
34556-253 253 253 253 253 253 231 231 231 6 6 6
34557- 2 2 6 2 2 6 10 10 10 30 30 30
34558- 2 2 6 2 2 6 2 2 6 2 2 6
34559- 2 2 6 66 66 66 58 58 58 22 22 22
34560- 6 6 6 0 0 0 0 0 0 0 0 0
34561- 0 0 0 0 0 0 0 0 0 0 0 0
34562- 0 0 0 0 0 0 0 0 0 0 0 0
34563- 0 0 0 0 0 0 0 0 0 0 0 0
34564- 0 0 0 0 0 0 0 0 0 0 0 0
34565- 0 0 0 0 0 0 0 0 0 0 0 0
34566- 0 0 0 0 0 0 0 0 0 10 10 10
34567- 38 38 38 78 78 78 6 6 6 2 2 6
34568- 2 2 6 46 46 46 14 14 14 42 42 42
34569-246 246 246 253 253 253 253 253 253 253 253 253
34570-253 253 253 253 253 253 253 253 253 253 253 253
34571-253 253 253 253 253 253 231 231 231 242 242 242
34572-253 253 253 253 253 253 253 253 253 253 253 253
34573-253 253 253 253 253 253 253 253 253 253 253 253
34574-253 253 253 253 253 253 253 253 253 253 253 253
34575-253 253 253 253 253 253 253 253 253 253 253 253
34576-253 253 253 253 253 253 234 234 234 10 10 10
34577- 2 2 6 2 2 6 22 22 22 14 14 14
34578- 2 2 6 2 2 6 2 2 6 2 2 6
34579- 2 2 6 66 66 66 62 62 62 22 22 22
34580- 6 6 6 0 0 0 0 0 0 0 0 0
34581- 0 0 0 0 0 0 0 0 0 0 0 0
34582- 0 0 0 0 0 0 0 0 0 0 0 0
34583- 0 0 0 0 0 0 0 0 0 0 0 0
34584- 0 0 0 0 0 0 0 0 0 0 0 0
34585- 0 0 0 0 0 0 0 0 0 0 0 0
34586- 0 0 0 0 0 0 6 6 6 18 18 18
34587- 50 50 50 74 74 74 2 2 6 2 2 6
34588- 14 14 14 70 70 70 34 34 34 62 62 62
34589-250 250 250 253 253 253 253 253 253 253 253 253
34590-253 253 253 253 253 253 253 253 253 253 253 253
34591-253 253 253 253 253 253 231 231 231 246 246 246
34592-253 253 253 253 253 253 253 253 253 253 253 253
34593-253 253 253 253 253 253 253 253 253 253 253 253
34594-253 253 253 253 253 253 253 253 253 253 253 253
34595-253 253 253 253 253 253 253 253 253 253 253 253
34596-253 253 253 253 253 253 234 234 234 14 14 14
34597- 2 2 6 2 2 6 30 30 30 2 2 6
34598- 2 2 6 2 2 6 2 2 6 2 2 6
34599- 2 2 6 66 66 66 62 62 62 22 22 22
34600- 6 6 6 0 0 0 0 0 0 0 0 0
34601- 0 0 0 0 0 0 0 0 0 0 0 0
34602- 0 0 0 0 0 0 0 0 0 0 0 0
34603- 0 0 0 0 0 0 0 0 0 0 0 0
34604- 0 0 0 0 0 0 0 0 0 0 0 0
34605- 0 0 0 0 0 0 0 0 0 0 0 0
34606- 0 0 0 0 0 0 6 6 6 18 18 18
34607- 54 54 54 62 62 62 2 2 6 2 2 6
34608- 2 2 6 30 30 30 46 46 46 70 70 70
34609-250 250 250 253 253 253 253 253 253 253 253 253
34610-253 253 253 253 253 253 253 253 253 253 253 253
34611-253 253 253 253 253 253 231 231 231 246 246 246
34612-253 253 253 253 253 253 253 253 253 253 253 253
34613-253 253 253 253 253 253 253 253 253 253 253 253
34614-253 253 253 253 253 253 253 253 253 253 253 253
34615-253 253 253 253 253 253 253 253 253 253 253 253
34616-253 253 253 253 253 253 226 226 226 10 10 10
34617- 2 2 6 6 6 6 30 30 30 2 2 6
34618- 2 2 6 2 2 6 2 2 6 2 2 6
34619- 2 2 6 66 66 66 58 58 58 22 22 22
34620- 6 6 6 0 0 0 0 0 0 0 0 0
34621- 0 0 0 0 0 0 0 0 0 0 0 0
34622- 0 0 0 0 0 0 0 0 0 0 0 0
34623- 0 0 0 0 0 0 0 0 0 0 0 0
34624- 0 0 0 0 0 0 0 0 0 0 0 0
34625- 0 0 0 0 0 0 0 0 0 0 0 0
34626- 0 0 0 0 0 0 6 6 6 22 22 22
34627- 58 58 58 62 62 62 2 2 6 2 2 6
34628- 2 2 6 2 2 6 30 30 30 78 78 78
34629-250 250 250 253 253 253 253 253 253 253 253 253
34630-253 253 253 253 253 253 253 253 253 253 253 253
34631-253 253 253 253 253 253 231 231 231 246 246 246
34632-253 253 253 253 253 253 253 253 253 253 253 253
34633-253 253 253 253 253 253 253 253 253 253 253 253
34634-253 253 253 253 253 253 253 253 253 253 253 253
34635-253 253 253 253 253 253 253 253 253 253 253 253
34636-253 253 253 253 253 253 206 206 206 2 2 6
34637- 22 22 22 34 34 34 18 14 6 22 22 22
34638- 26 26 26 18 18 18 6 6 6 2 2 6
34639- 2 2 6 82 82 82 54 54 54 18 18 18
34640- 6 6 6 0 0 0 0 0 0 0 0 0
34641- 0 0 0 0 0 0 0 0 0 0 0 0
34642- 0 0 0 0 0 0 0 0 0 0 0 0
34643- 0 0 0 0 0 0 0 0 0 0 0 0
34644- 0 0 0 0 0 0 0 0 0 0 0 0
34645- 0 0 0 0 0 0 0 0 0 0 0 0
34646- 0 0 0 0 0 0 6 6 6 26 26 26
34647- 62 62 62 106 106 106 74 54 14 185 133 11
34648-210 162 10 121 92 8 6 6 6 62 62 62
34649-238 238 238 253 253 253 253 253 253 253 253 253
34650-253 253 253 253 253 253 253 253 253 253 253 253
34651-253 253 253 253 253 253 231 231 231 246 246 246
34652-253 253 253 253 253 253 253 253 253 253 253 253
34653-253 253 253 253 253 253 253 253 253 253 253 253
34654-253 253 253 253 253 253 253 253 253 253 253 253
34655-253 253 253 253 253 253 253 253 253 253 253 253
34656-253 253 253 253 253 253 158 158 158 18 18 18
34657- 14 14 14 2 2 6 2 2 6 2 2 6
34658- 6 6 6 18 18 18 66 66 66 38 38 38
34659- 6 6 6 94 94 94 50 50 50 18 18 18
34660- 6 6 6 0 0 0 0 0 0 0 0 0
34661- 0 0 0 0 0 0 0 0 0 0 0 0
34662- 0 0 0 0 0 0 0 0 0 0 0 0
34663- 0 0 0 0 0 0 0 0 0 0 0 0
34664- 0 0 0 0 0 0 0 0 0 0 0 0
34665- 0 0 0 0 0 0 0 0 0 6 6 6
34666- 10 10 10 10 10 10 18 18 18 38 38 38
34667- 78 78 78 142 134 106 216 158 10 242 186 14
34668-246 190 14 246 190 14 156 118 10 10 10 10
34669- 90 90 90 238 238 238 253 253 253 253 253 253
34670-253 253 253 253 253 253 253 253 253 253 253 253
34671-253 253 253 253 253 253 231 231 231 250 250 250
34672-253 253 253 253 253 253 253 253 253 253 253 253
34673-253 253 253 253 253 253 253 253 253 253 253 253
34674-253 253 253 253 253 253 253 253 253 253 253 253
34675-253 253 253 253 253 253 253 253 253 246 230 190
34676-238 204 91 238 204 91 181 142 44 37 26 9
34677- 2 2 6 2 2 6 2 2 6 2 2 6
34678- 2 2 6 2 2 6 38 38 38 46 46 46
34679- 26 26 26 106 106 106 54 54 54 18 18 18
34680- 6 6 6 0 0 0 0 0 0 0 0 0
34681- 0 0 0 0 0 0 0 0 0 0 0 0
34682- 0 0 0 0 0 0 0 0 0 0 0 0
34683- 0 0 0 0 0 0 0 0 0 0 0 0
34684- 0 0 0 0 0 0 0 0 0 0 0 0
34685- 0 0 0 6 6 6 14 14 14 22 22 22
34686- 30 30 30 38 38 38 50 50 50 70 70 70
34687-106 106 106 190 142 34 226 170 11 242 186 14
34688-246 190 14 246 190 14 246 190 14 154 114 10
34689- 6 6 6 74 74 74 226 226 226 253 253 253
34690-253 253 253 253 253 253 253 253 253 253 253 253
34691-253 253 253 253 253 253 231 231 231 250 250 250
34692-253 253 253 253 253 253 253 253 253 253 253 253
34693-253 253 253 253 253 253 253 253 253 253 253 253
34694-253 253 253 253 253 253 253 253 253 253 253 253
34695-253 253 253 253 253 253 253 253 253 228 184 62
34696-241 196 14 241 208 19 232 195 16 38 30 10
34697- 2 2 6 2 2 6 2 2 6 2 2 6
34698- 2 2 6 6 6 6 30 30 30 26 26 26
34699-203 166 17 154 142 90 66 66 66 26 26 26
34700- 6 6 6 0 0 0 0 0 0 0 0 0
34701- 0 0 0 0 0 0 0 0 0 0 0 0
34702- 0 0 0 0 0 0 0 0 0 0 0 0
34703- 0 0 0 0 0 0 0 0 0 0 0 0
34704- 0 0 0 0 0 0 0 0 0 0 0 0
34705- 6 6 6 18 18 18 38 38 38 58 58 58
34706- 78 78 78 86 86 86 101 101 101 123 123 123
34707-175 146 61 210 150 10 234 174 13 246 186 14
34708-246 190 14 246 190 14 246 190 14 238 190 10
34709-102 78 10 2 2 6 46 46 46 198 198 198
34710-253 253 253 253 253 253 253 253 253 253 253 253
34711-253 253 253 253 253 253 234 234 234 242 242 242
34712-253 253 253 253 253 253 253 253 253 253 253 253
34713-253 253 253 253 253 253 253 253 253 253 253 253
34714-253 253 253 253 253 253 253 253 253 253 253 253
34715-253 253 253 253 253 253 253 253 253 224 178 62
34716-242 186 14 241 196 14 210 166 10 22 18 6
34717- 2 2 6 2 2 6 2 2 6 2 2 6
34718- 2 2 6 2 2 6 6 6 6 121 92 8
34719-238 202 15 232 195 16 82 82 82 34 34 34
34720- 10 10 10 0 0 0 0 0 0 0 0 0
34721- 0 0 0 0 0 0 0 0 0 0 0 0
34722- 0 0 0 0 0 0 0 0 0 0 0 0
34723- 0 0 0 0 0 0 0 0 0 0 0 0
34724- 0 0 0 0 0 0 0 0 0 0 0 0
34725- 14 14 14 38 38 38 70 70 70 154 122 46
34726-190 142 34 200 144 11 197 138 11 197 138 11
34727-213 154 11 226 170 11 242 186 14 246 190 14
34728-246 190 14 246 190 14 246 190 14 246 190 14
34729-225 175 15 46 32 6 2 2 6 22 22 22
34730-158 158 158 250 250 250 253 253 253 253 253 253
34731-253 253 253 253 253 253 253 253 253 253 253 253
34732-253 253 253 253 253 253 253 253 253 253 253 253
34733-253 253 253 253 253 253 253 253 253 253 253 253
34734-253 253 253 253 253 253 253 253 253 253 253 253
34735-253 253 253 250 250 250 242 242 242 224 178 62
34736-239 182 13 236 186 11 213 154 11 46 32 6
34737- 2 2 6 2 2 6 2 2 6 2 2 6
34738- 2 2 6 2 2 6 61 42 6 225 175 15
34739-238 190 10 236 186 11 112 100 78 42 42 42
34740- 14 14 14 0 0 0 0 0 0 0 0 0
34741- 0 0 0 0 0 0 0 0 0 0 0 0
34742- 0 0 0 0 0 0 0 0 0 0 0 0
34743- 0 0 0 0 0 0 0 0 0 0 0 0
34744- 0 0 0 0 0 0 0 0 0 6 6 6
34745- 22 22 22 54 54 54 154 122 46 213 154 11
34746-226 170 11 230 174 11 226 170 11 226 170 11
34747-236 178 12 242 186 14 246 190 14 246 190 14
34748-246 190 14 246 190 14 246 190 14 246 190 14
34749-241 196 14 184 144 12 10 10 10 2 2 6
34750- 6 6 6 116 116 116 242 242 242 253 253 253
34751-253 253 253 253 253 253 253 253 253 253 253 253
34752-253 253 253 253 253 253 253 253 253 253 253 253
34753-253 253 253 253 253 253 253 253 253 253 253 253
34754-253 253 253 253 253 253 253 253 253 253 253 253
34755-253 253 253 231 231 231 198 198 198 214 170 54
34756-236 178 12 236 178 12 210 150 10 137 92 6
34757- 18 14 6 2 2 6 2 2 6 2 2 6
34758- 6 6 6 70 47 6 200 144 11 236 178 12
34759-239 182 13 239 182 13 124 112 88 58 58 58
34760- 22 22 22 6 6 6 0 0 0 0 0 0
34761- 0 0 0 0 0 0 0 0 0 0 0 0
34762- 0 0 0 0 0 0 0 0 0 0 0 0
34763- 0 0 0 0 0 0 0 0 0 0 0 0
34764- 0 0 0 0 0 0 0 0 0 10 10 10
34765- 30 30 30 70 70 70 180 133 36 226 170 11
34766-239 182 13 242 186 14 242 186 14 246 186 14
34767-246 190 14 246 190 14 246 190 14 246 190 14
34768-246 190 14 246 190 14 246 190 14 246 190 14
34769-246 190 14 232 195 16 98 70 6 2 2 6
34770- 2 2 6 2 2 6 66 66 66 221 221 221
34771-253 253 253 253 253 253 253 253 253 253 253 253
34772-253 253 253 253 253 253 253 253 253 253 253 253
34773-253 253 253 253 253 253 253 253 253 253 253 253
34774-253 253 253 253 253 253 253 253 253 253 253 253
34775-253 253 253 206 206 206 198 198 198 214 166 58
34776-230 174 11 230 174 11 216 158 10 192 133 9
34777-163 110 8 116 81 8 102 78 10 116 81 8
34778-167 114 7 197 138 11 226 170 11 239 182 13
34779-242 186 14 242 186 14 162 146 94 78 78 78
34780- 34 34 34 14 14 14 6 6 6 0 0 0
34781- 0 0 0 0 0 0 0 0 0 0 0 0
34782- 0 0 0 0 0 0 0 0 0 0 0 0
34783- 0 0 0 0 0 0 0 0 0 0 0 0
34784- 0 0 0 0 0 0 0 0 0 6 6 6
34785- 30 30 30 78 78 78 190 142 34 226 170 11
34786-239 182 13 246 190 14 246 190 14 246 190 14
34787-246 190 14 246 190 14 246 190 14 246 190 14
34788-246 190 14 246 190 14 246 190 14 246 190 14
34789-246 190 14 241 196 14 203 166 17 22 18 6
34790- 2 2 6 2 2 6 2 2 6 38 38 38
34791-218 218 218 253 253 253 253 253 253 253 253 253
34792-253 253 253 253 253 253 253 253 253 253 253 253
34793-253 253 253 253 253 253 253 253 253 253 253 253
34794-253 253 253 253 253 253 253 253 253 253 253 253
34795-250 250 250 206 206 206 198 198 198 202 162 69
34796-226 170 11 236 178 12 224 166 10 210 150 10
34797-200 144 11 197 138 11 192 133 9 197 138 11
34798-210 150 10 226 170 11 242 186 14 246 190 14
34799-246 190 14 246 186 14 225 175 15 124 112 88
34800- 62 62 62 30 30 30 14 14 14 6 6 6
34801- 0 0 0 0 0 0 0 0 0 0 0 0
34802- 0 0 0 0 0 0 0 0 0 0 0 0
34803- 0 0 0 0 0 0 0 0 0 0 0 0
34804- 0 0 0 0 0 0 0 0 0 10 10 10
34805- 30 30 30 78 78 78 174 135 50 224 166 10
34806-239 182 13 246 190 14 246 190 14 246 190 14
34807-246 190 14 246 190 14 246 190 14 246 190 14
34808-246 190 14 246 190 14 246 190 14 246 190 14
34809-246 190 14 246 190 14 241 196 14 139 102 15
34810- 2 2 6 2 2 6 2 2 6 2 2 6
34811- 78 78 78 250 250 250 253 253 253 253 253 253
34812-253 253 253 253 253 253 253 253 253 253 253 253
34813-253 253 253 253 253 253 253 253 253 253 253 253
34814-253 253 253 253 253 253 253 253 253 253 253 253
34815-250 250 250 214 214 214 198 198 198 190 150 46
34816-219 162 10 236 178 12 234 174 13 224 166 10
34817-216 158 10 213 154 11 213 154 11 216 158 10
34818-226 170 11 239 182 13 246 190 14 246 190 14
34819-246 190 14 246 190 14 242 186 14 206 162 42
34820-101 101 101 58 58 58 30 30 30 14 14 14
34821- 6 6 6 0 0 0 0 0 0 0 0 0
34822- 0 0 0 0 0 0 0 0 0 0 0 0
34823- 0 0 0 0 0 0 0 0 0 0 0 0
34824- 0 0 0 0 0 0 0 0 0 10 10 10
34825- 30 30 30 74 74 74 174 135 50 216 158 10
34826-236 178 12 246 190 14 246 190 14 246 190 14
34827-246 190 14 246 190 14 246 190 14 246 190 14
34828-246 190 14 246 190 14 246 190 14 246 190 14
34829-246 190 14 246 190 14 241 196 14 226 184 13
34830- 61 42 6 2 2 6 2 2 6 2 2 6
34831- 22 22 22 238 238 238 253 253 253 253 253 253
34832-253 253 253 253 253 253 253 253 253 253 253 253
34833-253 253 253 253 253 253 253 253 253 253 253 253
34834-253 253 253 253 253 253 253 253 253 253 253 253
34835-253 253 253 226 226 226 187 187 187 180 133 36
34836-216 158 10 236 178 12 239 182 13 236 178 12
34837-230 174 11 226 170 11 226 170 11 230 174 11
34838-236 178 12 242 186 14 246 190 14 246 190 14
34839-246 190 14 246 190 14 246 186 14 239 182 13
34840-206 162 42 106 106 106 66 66 66 34 34 34
34841- 14 14 14 6 6 6 0 0 0 0 0 0
34842- 0 0 0 0 0 0 0 0 0 0 0 0
34843- 0 0 0 0 0 0 0 0 0 0 0 0
34844- 0 0 0 0 0 0 0 0 0 6 6 6
34845- 26 26 26 70 70 70 163 133 67 213 154 11
34846-236 178 12 246 190 14 246 190 14 246 190 14
34847-246 190 14 246 190 14 246 190 14 246 190 14
34848-246 190 14 246 190 14 246 190 14 246 190 14
34849-246 190 14 246 190 14 246 190 14 241 196 14
34850-190 146 13 18 14 6 2 2 6 2 2 6
34851- 46 46 46 246 246 246 253 253 253 253 253 253
34852-253 253 253 253 253 253 253 253 253 253 253 253
34853-253 253 253 253 253 253 253 253 253 253 253 253
34854-253 253 253 253 253 253 253 253 253 253 253 253
34855-253 253 253 221 221 221 86 86 86 156 107 11
34856-216 158 10 236 178 12 242 186 14 246 186 14
34857-242 186 14 239 182 13 239 182 13 242 186 14
34858-242 186 14 246 186 14 246 190 14 246 190 14
34859-246 190 14 246 190 14 246 190 14 246 190 14
34860-242 186 14 225 175 15 142 122 72 66 66 66
34861- 30 30 30 10 10 10 0 0 0 0 0 0
34862- 0 0 0 0 0 0 0 0 0 0 0 0
34863- 0 0 0 0 0 0 0 0 0 0 0 0
34864- 0 0 0 0 0 0 0 0 0 6 6 6
34865- 26 26 26 70 70 70 163 133 67 210 150 10
34866-236 178 12 246 190 14 246 190 14 246 190 14
34867-246 190 14 246 190 14 246 190 14 246 190 14
34868-246 190 14 246 190 14 246 190 14 246 190 14
34869-246 190 14 246 190 14 246 190 14 246 190 14
34870-232 195 16 121 92 8 34 34 34 106 106 106
34871-221 221 221 253 253 253 253 253 253 253 253 253
34872-253 253 253 253 253 253 253 253 253 253 253 253
34873-253 253 253 253 253 253 253 253 253 253 253 253
34874-253 253 253 253 253 253 253 253 253 253 253 253
34875-242 242 242 82 82 82 18 14 6 163 110 8
34876-216 158 10 236 178 12 242 186 14 246 190 14
34877-246 190 14 246 190 14 246 190 14 246 190 14
34878-246 190 14 246 190 14 246 190 14 246 190 14
34879-246 190 14 246 190 14 246 190 14 246 190 14
34880-246 190 14 246 190 14 242 186 14 163 133 67
34881- 46 46 46 18 18 18 6 6 6 0 0 0
34882- 0 0 0 0 0 0 0 0 0 0 0 0
34883- 0 0 0 0 0 0 0 0 0 0 0 0
34884- 0 0 0 0 0 0 0 0 0 10 10 10
34885- 30 30 30 78 78 78 163 133 67 210 150 10
34886-236 178 12 246 186 14 246 190 14 246 190 14
34887-246 190 14 246 190 14 246 190 14 246 190 14
34888-246 190 14 246 190 14 246 190 14 246 190 14
34889-246 190 14 246 190 14 246 190 14 246 190 14
34890-241 196 14 215 174 15 190 178 144 253 253 253
34891-253 253 253 253 253 253 253 253 253 253 253 253
34892-253 253 253 253 253 253 253 253 253 253 253 253
34893-253 253 253 253 253 253 253 253 253 253 253 253
34894-253 253 253 253 253 253 253 253 253 218 218 218
34895- 58 58 58 2 2 6 22 18 6 167 114 7
34896-216 158 10 236 178 12 246 186 14 246 190 14
34897-246 190 14 246 190 14 246 190 14 246 190 14
34898-246 190 14 246 190 14 246 190 14 246 190 14
34899-246 190 14 246 190 14 246 190 14 246 190 14
34900-246 190 14 246 186 14 242 186 14 190 150 46
34901- 54 54 54 22 22 22 6 6 6 0 0 0
34902- 0 0 0 0 0 0 0 0 0 0 0 0
34903- 0 0 0 0 0 0 0 0 0 0 0 0
34904- 0 0 0 0 0 0 0 0 0 14 14 14
34905- 38 38 38 86 86 86 180 133 36 213 154 11
34906-236 178 12 246 186 14 246 190 14 246 190 14
34907-246 190 14 246 190 14 246 190 14 246 190 14
34908-246 190 14 246 190 14 246 190 14 246 190 14
34909-246 190 14 246 190 14 246 190 14 246 190 14
34910-246 190 14 232 195 16 190 146 13 214 214 214
34911-253 253 253 253 253 253 253 253 253 253 253 253
34912-253 253 253 253 253 253 253 253 253 253 253 253
34913-253 253 253 253 253 253 253 253 253 253 253 253
34914-253 253 253 250 250 250 170 170 170 26 26 26
34915- 2 2 6 2 2 6 37 26 9 163 110 8
34916-219 162 10 239 182 13 246 186 14 246 190 14
34917-246 190 14 246 190 14 246 190 14 246 190 14
34918-246 190 14 246 190 14 246 190 14 246 190 14
34919-246 190 14 246 190 14 246 190 14 246 190 14
34920-246 186 14 236 178 12 224 166 10 142 122 72
34921- 46 46 46 18 18 18 6 6 6 0 0 0
34922- 0 0 0 0 0 0 0 0 0 0 0 0
34923- 0 0 0 0 0 0 0 0 0 0 0 0
34924- 0 0 0 0 0 0 6 6 6 18 18 18
34925- 50 50 50 109 106 95 192 133 9 224 166 10
34926-242 186 14 246 190 14 246 190 14 246 190 14
34927-246 190 14 246 190 14 246 190 14 246 190 14
34928-246 190 14 246 190 14 246 190 14 246 190 14
34929-246 190 14 246 190 14 246 190 14 246 190 14
34930-242 186 14 226 184 13 210 162 10 142 110 46
34931-226 226 226 253 253 253 253 253 253 253 253 253
34932-253 253 253 253 253 253 253 253 253 253 253 253
34933-253 253 253 253 253 253 253 253 253 253 253 253
34934-198 198 198 66 66 66 2 2 6 2 2 6
34935- 2 2 6 2 2 6 50 34 6 156 107 11
34936-219 162 10 239 182 13 246 186 14 246 190 14
34937-246 190 14 246 190 14 246 190 14 246 190 14
34938-246 190 14 246 190 14 246 190 14 246 190 14
34939-246 190 14 246 190 14 246 190 14 242 186 14
34940-234 174 13 213 154 11 154 122 46 66 66 66
34941- 30 30 30 10 10 10 0 0 0 0 0 0
34942- 0 0 0 0 0 0 0 0 0 0 0 0
34943- 0 0 0 0 0 0 0 0 0 0 0 0
34944- 0 0 0 0 0 0 6 6 6 22 22 22
34945- 58 58 58 154 121 60 206 145 10 234 174 13
34946-242 186 14 246 186 14 246 190 14 246 190 14
34947-246 190 14 246 190 14 246 190 14 246 190 14
34948-246 190 14 246 190 14 246 190 14 246 190 14
34949-246 190 14 246 190 14 246 190 14 246 190 14
34950-246 186 14 236 178 12 210 162 10 163 110 8
34951- 61 42 6 138 138 138 218 218 218 250 250 250
34952-253 253 253 253 253 253 253 253 253 250 250 250
34953-242 242 242 210 210 210 144 144 144 66 66 66
34954- 6 6 6 2 2 6 2 2 6 2 2 6
34955- 2 2 6 2 2 6 61 42 6 163 110 8
34956-216 158 10 236 178 12 246 190 14 246 190 14
34957-246 190 14 246 190 14 246 190 14 246 190 14
34958-246 190 14 246 190 14 246 190 14 246 190 14
34959-246 190 14 239 182 13 230 174 11 216 158 10
34960-190 142 34 124 112 88 70 70 70 38 38 38
34961- 18 18 18 6 6 6 0 0 0 0 0 0
34962- 0 0 0 0 0 0 0 0 0 0 0 0
34963- 0 0 0 0 0 0 0 0 0 0 0 0
34964- 0 0 0 0 0 0 6 6 6 22 22 22
34965- 62 62 62 168 124 44 206 145 10 224 166 10
34966-236 178 12 239 182 13 242 186 14 242 186 14
34967-246 186 14 246 190 14 246 190 14 246 190 14
34968-246 190 14 246 190 14 246 190 14 246 190 14
34969-246 190 14 246 190 14 246 190 14 246 190 14
34970-246 190 14 236 178 12 216 158 10 175 118 6
34971- 80 54 7 2 2 6 6 6 6 30 30 30
34972- 54 54 54 62 62 62 50 50 50 38 38 38
34973- 14 14 14 2 2 6 2 2 6 2 2 6
34974- 2 2 6 2 2 6 2 2 6 2 2 6
34975- 2 2 6 6 6 6 80 54 7 167 114 7
34976-213 154 11 236 178 12 246 190 14 246 190 14
34977-246 190 14 246 190 14 246 190 14 246 190 14
34978-246 190 14 242 186 14 239 182 13 239 182 13
34979-230 174 11 210 150 10 174 135 50 124 112 88
34980- 82 82 82 54 54 54 34 34 34 18 18 18
34981- 6 6 6 0 0 0 0 0 0 0 0 0
34982- 0 0 0 0 0 0 0 0 0 0 0 0
34983- 0 0 0 0 0 0 0 0 0 0 0 0
34984- 0 0 0 0 0 0 6 6 6 18 18 18
34985- 50 50 50 158 118 36 192 133 9 200 144 11
34986-216 158 10 219 162 10 224 166 10 226 170 11
34987-230 174 11 236 178 12 239 182 13 239 182 13
34988-242 186 14 246 186 14 246 190 14 246 190 14
34989-246 190 14 246 190 14 246 190 14 246 190 14
34990-246 186 14 230 174 11 210 150 10 163 110 8
34991-104 69 6 10 10 10 2 2 6 2 2 6
34992- 2 2 6 2 2 6 2 2 6 2 2 6
34993- 2 2 6 2 2 6 2 2 6 2 2 6
34994- 2 2 6 2 2 6 2 2 6 2 2 6
34995- 2 2 6 6 6 6 91 60 6 167 114 7
34996-206 145 10 230 174 11 242 186 14 246 190 14
34997-246 190 14 246 190 14 246 186 14 242 186 14
34998-239 182 13 230 174 11 224 166 10 213 154 11
34999-180 133 36 124 112 88 86 86 86 58 58 58
35000- 38 38 38 22 22 22 10 10 10 6 6 6
35001- 0 0 0 0 0 0 0 0 0 0 0 0
35002- 0 0 0 0 0 0 0 0 0 0 0 0
35003- 0 0 0 0 0 0 0 0 0 0 0 0
35004- 0 0 0 0 0 0 0 0 0 14 14 14
35005- 34 34 34 70 70 70 138 110 50 158 118 36
35006-167 114 7 180 123 7 192 133 9 197 138 11
35007-200 144 11 206 145 10 213 154 11 219 162 10
35008-224 166 10 230 174 11 239 182 13 242 186 14
35009-246 186 14 246 186 14 246 186 14 246 186 14
35010-239 182 13 216 158 10 185 133 11 152 99 6
35011-104 69 6 18 14 6 2 2 6 2 2 6
35012- 2 2 6 2 2 6 2 2 6 2 2 6
35013- 2 2 6 2 2 6 2 2 6 2 2 6
35014- 2 2 6 2 2 6 2 2 6 2 2 6
35015- 2 2 6 6 6 6 80 54 7 152 99 6
35016-192 133 9 219 162 10 236 178 12 239 182 13
35017-246 186 14 242 186 14 239 182 13 236 178 12
35018-224 166 10 206 145 10 192 133 9 154 121 60
35019- 94 94 94 62 62 62 42 42 42 22 22 22
35020- 14 14 14 6 6 6 0 0 0 0 0 0
35021- 0 0 0 0 0 0 0 0 0 0 0 0
35022- 0 0 0 0 0 0 0 0 0 0 0 0
35023- 0 0 0 0 0 0 0 0 0 0 0 0
35024- 0 0 0 0 0 0 0 0 0 6 6 6
35025- 18 18 18 34 34 34 58 58 58 78 78 78
35026-101 98 89 124 112 88 142 110 46 156 107 11
35027-163 110 8 167 114 7 175 118 6 180 123 7
35028-185 133 11 197 138 11 210 150 10 219 162 10
35029-226 170 11 236 178 12 236 178 12 234 174 13
35030-219 162 10 197 138 11 163 110 8 130 83 6
35031- 91 60 6 10 10 10 2 2 6 2 2 6
35032- 18 18 18 38 38 38 38 38 38 38 38 38
35033- 38 38 38 38 38 38 38 38 38 38 38 38
35034- 38 38 38 38 38 38 26 26 26 2 2 6
35035- 2 2 6 6 6 6 70 47 6 137 92 6
35036-175 118 6 200 144 11 219 162 10 230 174 11
35037-234 174 13 230 174 11 219 162 10 210 150 10
35038-192 133 9 163 110 8 124 112 88 82 82 82
35039- 50 50 50 30 30 30 14 14 14 6 6 6
35040- 0 0 0 0 0 0 0 0 0 0 0 0
35041- 0 0 0 0 0 0 0 0 0 0 0 0
35042- 0 0 0 0 0 0 0 0 0 0 0 0
35043- 0 0 0 0 0 0 0 0 0 0 0 0
35044- 0 0 0 0 0 0 0 0 0 0 0 0
35045- 6 6 6 14 14 14 22 22 22 34 34 34
35046- 42 42 42 58 58 58 74 74 74 86 86 86
35047-101 98 89 122 102 70 130 98 46 121 87 25
35048-137 92 6 152 99 6 163 110 8 180 123 7
35049-185 133 11 197 138 11 206 145 10 200 144 11
35050-180 123 7 156 107 11 130 83 6 104 69 6
35051- 50 34 6 54 54 54 110 110 110 101 98 89
35052- 86 86 86 82 82 82 78 78 78 78 78 78
35053- 78 78 78 78 78 78 78 78 78 78 78 78
35054- 78 78 78 82 82 82 86 86 86 94 94 94
35055-106 106 106 101 101 101 86 66 34 124 80 6
35056-156 107 11 180 123 7 192 133 9 200 144 11
35057-206 145 10 200 144 11 192 133 9 175 118 6
35058-139 102 15 109 106 95 70 70 70 42 42 42
35059- 22 22 22 10 10 10 0 0 0 0 0 0
35060- 0 0 0 0 0 0 0 0 0 0 0 0
35061- 0 0 0 0 0 0 0 0 0 0 0 0
35062- 0 0 0 0 0 0 0 0 0 0 0 0
35063- 0 0 0 0 0 0 0 0 0 0 0 0
35064- 0 0 0 0 0 0 0 0 0 0 0 0
35065- 0 0 0 0 0 0 6 6 6 10 10 10
35066- 14 14 14 22 22 22 30 30 30 38 38 38
35067- 50 50 50 62 62 62 74 74 74 90 90 90
35068-101 98 89 112 100 78 121 87 25 124 80 6
35069-137 92 6 152 99 6 152 99 6 152 99 6
35070-138 86 6 124 80 6 98 70 6 86 66 30
35071-101 98 89 82 82 82 58 58 58 46 46 46
35072- 38 38 38 34 34 34 34 34 34 34 34 34
35073- 34 34 34 34 34 34 34 34 34 34 34 34
35074- 34 34 34 34 34 34 38 38 38 42 42 42
35075- 54 54 54 82 82 82 94 86 76 91 60 6
35076-134 86 6 156 107 11 167 114 7 175 118 6
35077-175 118 6 167 114 7 152 99 6 121 87 25
35078-101 98 89 62 62 62 34 34 34 18 18 18
35079- 6 6 6 0 0 0 0 0 0 0 0 0
35080- 0 0 0 0 0 0 0 0 0 0 0 0
35081- 0 0 0 0 0 0 0 0 0 0 0 0
35082- 0 0 0 0 0 0 0 0 0 0 0 0
35083- 0 0 0 0 0 0 0 0 0 0 0 0
35084- 0 0 0 0 0 0 0 0 0 0 0 0
35085- 0 0 0 0 0 0 0 0 0 0 0 0
35086- 0 0 0 6 6 6 6 6 6 10 10 10
35087- 18 18 18 22 22 22 30 30 30 42 42 42
35088- 50 50 50 66 66 66 86 86 86 101 98 89
35089-106 86 58 98 70 6 104 69 6 104 69 6
35090-104 69 6 91 60 6 82 62 34 90 90 90
35091- 62 62 62 38 38 38 22 22 22 14 14 14
35092- 10 10 10 10 10 10 10 10 10 10 10 10
35093- 10 10 10 10 10 10 6 6 6 10 10 10
35094- 10 10 10 10 10 10 10 10 10 14 14 14
35095- 22 22 22 42 42 42 70 70 70 89 81 66
35096- 80 54 7 104 69 6 124 80 6 137 92 6
35097-134 86 6 116 81 8 100 82 52 86 86 86
35098- 58 58 58 30 30 30 14 14 14 6 6 6
35099- 0 0 0 0 0 0 0 0 0 0 0 0
35100- 0 0 0 0 0 0 0 0 0 0 0 0
35101- 0 0 0 0 0 0 0 0 0 0 0 0
35102- 0 0 0 0 0 0 0 0 0 0 0 0
35103- 0 0 0 0 0 0 0 0 0 0 0 0
35104- 0 0 0 0 0 0 0 0 0 0 0 0
35105- 0 0 0 0 0 0 0 0 0 0 0 0
35106- 0 0 0 0 0 0 0 0 0 0 0 0
35107- 0 0 0 6 6 6 10 10 10 14 14 14
35108- 18 18 18 26 26 26 38 38 38 54 54 54
35109- 70 70 70 86 86 86 94 86 76 89 81 66
35110- 89 81 66 86 86 86 74 74 74 50 50 50
35111- 30 30 30 14 14 14 6 6 6 0 0 0
35112- 0 0 0 0 0 0 0 0 0 0 0 0
35113- 0 0 0 0 0 0 0 0 0 0 0 0
35114- 0 0 0 0 0 0 0 0 0 0 0 0
35115- 6 6 6 18 18 18 34 34 34 58 58 58
35116- 82 82 82 89 81 66 89 81 66 89 81 66
35117- 94 86 66 94 86 76 74 74 74 50 50 50
35118- 26 26 26 14 14 14 6 6 6 0 0 0
35119- 0 0 0 0 0 0 0 0 0 0 0 0
35120- 0 0 0 0 0 0 0 0 0 0 0 0
35121- 0 0 0 0 0 0 0 0 0 0 0 0
35122- 0 0 0 0 0 0 0 0 0 0 0 0
35123- 0 0 0 0 0 0 0 0 0 0 0 0
35124- 0 0 0 0 0 0 0 0 0 0 0 0
35125- 0 0 0 0 0 0 0 0 0 0 0 0
35126- 0 0 0 0 0 0 0 0 0 0 0 0
35127- 0 0 0 0 0 0 0 0 0 0 0 0
35128- 6 6 6 6 6 6 14 14 14 18 18 18
35129- 30 30 30 38 38 38 46 46 46 54 54 54
35130- 50 50 50 42 42 42 30 30 30 18 18 18
35131- 10 10 10 0 0 0 0 0 0 0 0 0
35132- 0 0 0 0 0 0 0 0 0 0 0 0
35133- 0 0 0 0 0 0 0 0 0 0 0 0
35134- 0 0 0 0 0 0 0 0 0 0 0 0
35135- 0 0 0 6 6 6 14 14 14 26 26 26
35136- 38 38 38 50 50 50 58 58 58 58 58 58
35137- 54 54 54 42 42 42 30 30 30 18 18 18
35138- 10 10 10 0 0 0 0 0 0 0 0 0
35139- 0 0 0 0 0 0 0 0 0 0 0 0
35140- 0 0 0 0 0 0 0 0 0 0 0 0
35141- 0 0 0 0 0 0 0 0 0 0 0 0
35142- 0 0 0 0 0 0 0 0 0 0 0 0
35143- 0 0 0 0 0 0 0 0 0 0 0 0
35144- 0 0 0 0 0 0 0 0 0 0 0 0
35145- 0 0 0 0 0 0 0 0 0 0 0 0
35146- 0 0 0 0 0 0 0 0 0 0 0 0
35147- 0 0 0 0 0 0 0 0 0 0 0 0
35148- 0 0 0 0 0 0 0 0 0 6 6 6
35149- 6 6 6 10 10 10 14 14 14 18 18 18
35150- 18 18 18 14 14 14 10 10 10 6 6 6
35151- 0 0 0 0 0 0 0 0 0 0 0 0
35152- 0 0 0 0 0 0 0 0 0 0 0 0
35153- 0 0 0 0 0 0 0 0 0 0 0 0
35154- 0 0 0 0 0 0 0 0 0 0 0 0
35155- 0 0 0 0 0 0 0 0 0 6 6 6
35156- 14 14 14 18 18 18 22 22 22 22 22 22
35157- 18 18 18 14 14 14 10 10 10 6 6 6
35158- 0 0 0 0 0 0 0 0 0 0 0 0
35159- 0 0 0 0 0 0 0 0 0 0 0 0
35160- 0 0 0 0 0 0 0 0 0 0 0 0
35161- 0 0 0 0 0 0 0 0 0 0 0 0
35162- 0 0 0 0 0 0 0 0 0 0 0 0
35163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35176+4 4 4 4 4 4
35177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35190+4 4 4 4 4 4
35191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35204+4 4 4 4 4 4
35205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35218+4 4 4 4 4 4
35219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35232+4 4 4 4 4 4
35233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35246+4 4 4 4 4 4
35247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35251+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
35252+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
35253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35256+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
35257+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
35258+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
35259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35260+4 4 4 4 4 4
35261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35265+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
35266+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
35267+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35270+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
35271+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
35272+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
35273+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35274+4 4 4 4 4 4
35275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35279+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
35280+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
35281+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
35282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35284+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
35285+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
35286+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
35287+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
35288+4 4 4 4 4 4
35289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35292+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
35293+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
35294+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
35295+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
35296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35297+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
35298+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
35299+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
35300+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
35301+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
35302+4 4 4 4 4 4
35303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35306+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
35307+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
35308+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
35309+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
35310+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
35311+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
35312+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
35313+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
35314+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
35315+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
35316+4 4 4 4 4 4
35317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
35320+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
35321+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
35322+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
35323+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
35324+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
35325+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
35326+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
35327+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
35328+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
35329+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
35330+4 4 4 4 4 4
35331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35333+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
35334+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
35335+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
35336+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
35337+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
35338+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
35339+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
35340+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
35341+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
35342+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
35343+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
35344+4 4 4 4 4 4
35345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35347+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
35348+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
35349+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
35350+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
35351+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
35352+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
35353+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
35354+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
35355+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
35356+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
35357+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
35358+4 4 4 4 4 4
35359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35361+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
35362+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
35363+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
35364+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
35365+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
35366+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
35367+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
35368+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
35369+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
35370+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
35371+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
35372+4 4 4 4 4 4
35373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35375+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
35376+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
35377+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
35378+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
35379+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
35380+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
35381+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
35382+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
35383+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
35384+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
35385+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
35386+4 4 4 4 4 4
35387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35388+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
35389+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
35390+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
35391+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
35392+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
35393+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
35394+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
35395+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
35396+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
35397+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
35398+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
35399+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
35400+4 4 4 4 4 4
35401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35402+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
35403+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
35404+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
35405+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
35406+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
35407+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
35408+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
35409+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
35410+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
35411+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
35412+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
35413+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
35414+0 0 0 4 4 4
35415+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
35416+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
35417+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
35418+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
35419+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
35420+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
35421+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
35422+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
35423+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
35424+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
35425+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
35426+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
35427+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
35428+2 0 0 0 0 0
35429+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
35430+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
35431+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
35432+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
35433+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
35434+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
35435+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
35436+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
35437+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
35438+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
35439+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
35440+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
35441+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
35442+37 38 37 0 0 0
35443+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
35444+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
35445+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
35446+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
35447+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
35448+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
35449+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
35450+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
35451+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
35452+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
35453+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
35454+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
35455+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
35456+85 115 134 4 0 0
35457+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
35458+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
35459+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
35460+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
35461+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
35462+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
35463+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
35464+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
35465+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
35466+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
35467+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
35468+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
35469+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
35470+60 73 81 4 0 0
35471+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
35472+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
35473+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
35474+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
35475+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
35476+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
35477+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
35478+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
35479+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
35480+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
35481+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
35482+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
35483+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
35484+16 19 21 4 0 0
35485+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
35486+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
35487+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
35488+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
35489+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
35490+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
35491+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
35492+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
35493+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
35494+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
35495+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
35496+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
35497+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
35498+4 0 0 4 3 3
35499+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
35500+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
35501+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
35502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
35503+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
35504+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
35505+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
35506+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
35507+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
35508+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
35509+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
35510+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
35511+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
35512+3 2 2 4 4 4
35513+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
35514+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
35515+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
35516+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
35517+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
35518+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
35519+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
35520+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
35521+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
35522+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
35523+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
35524+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
35525+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
35526+4 4 4 4 4 4
35527+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
35528+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
35529+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
35530+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
35531+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
35532+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
35533+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
35534+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
35535+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
35536+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
35537+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
35538+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
35539+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
35540+4 4 4 4 4 4
35541+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
35542+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
35543+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
35544+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
35545+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
35546+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
35547+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
35548+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
35549+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
35550+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
35551+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
35552+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
35553+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
35554+5 5 5 5 5 5
35555+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
35556+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
35557+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
35558+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
35559+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
35560+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
35561+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
35562+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
35563+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
35564+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
35565+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
35566+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
35567+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
35568+5 5 5 4 4 4
35569+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
35570+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
35571+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
35572+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
35573+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
35574+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
35575+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
35576+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
35577+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
35578+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
35579+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
35580+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
35581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35582+4 4 4 4 4 4
35583+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
35584+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
35585+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
35586+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
35587+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
35588+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
35589+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
35590+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
35591+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
35592+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
35593+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
35594+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
35595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35596+4 4 4 4 4 4
35597+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
35598+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
35599+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
35600+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
35601+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
35602+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
35603+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
35604+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
35605+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
35606+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
35607+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
35608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35610+4 4 4 4 4 4
35611+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
35612+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
35613+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
35614+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
35615+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
35616+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
35617+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
35618+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
35619+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
35620+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
35621+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
35622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35624+4 4 4 4 4 4
35625+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
35626+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
35627+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
35628+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
35629+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
35630+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
35631+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
35632+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
35633+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
35634+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
35635+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35638+4 4 4 4 4 4
35639+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
35640+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
35641+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
35642+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
35643+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
35644+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
35645+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
35646+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
35647+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
35648+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
35649+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
35650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35652+4 4 4 4 4 4
35653+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
35654+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
35655+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
35656+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
35657+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
35658+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
35659+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
35660+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
35661+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
35662+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
35663+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
35664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35666+4 4 4 4 4 4
35667+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
35668+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
35669+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
35670+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
35671+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
35672+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
35673+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
35674+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
35675+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
35676+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
35677+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35680+4 4 4 4 4 4
35681+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
35682+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
35683+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
35684+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
35685+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35686+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
35687+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
35688+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
35689+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
35690+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
35691+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35694+4 4 4 4 4 4
35695+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
35696+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
35697+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
35698+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
35699+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35700+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
35701+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
35702+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
35703+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
35704+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
35705+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35708+4 4 4 4 4 4
35709+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
35710+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
35711+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
35712+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
35713+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35714+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
35715+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
35716+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
35717+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
35718+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35719+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35722+4 4 4 4 4 4
35723+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
35724+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
35725+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
35726+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
35727+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
35728+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
35729+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
35730+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
35731+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
35732+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35733+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35736+4 4 4 4 4 4
35737+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
35738+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
35739+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
35740+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
35741+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35742+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
35743+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
35744+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
35745+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
35746+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35747+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35750+4 4 4 4 4 4
35751+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
35752+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
35753+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
35754+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
35755+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
35756+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
35757+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
35758+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
35759+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
35760+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35761+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35764+4 4 4 4 4 4
35765+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
35766+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
35767+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
35768+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
35769+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
35770+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
35771+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
35772+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
35773+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
35774+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35775+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35778+4 4 4 4 4 4
35779+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
35780+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
35781+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
35782+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
35783+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
35784+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
35785+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
35786+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
35787+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
35788+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35789+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35792+4 4 4 4 4 4
35793+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
35794+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
35795+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
35796+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
35797+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
35798+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
35799+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
35800+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
35801+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
35802+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35803+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35806+4 4 4 4 4 4
35807+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
35808+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
35809+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
35810+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
35811+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
35812+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
35813+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
35814+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
35815+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
35816+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35817+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35820+4 4 4 4 4 4
35821+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
35822+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
35823+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
35824+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
35825+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
35826+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
35827+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
35828+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
35829+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
35830+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35831+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35834+4 4 4 4 4 4
35835+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
35836+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
35837+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
35838+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
35839+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
35840+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
35841+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35842+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
35843+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
35844+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35845+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35848+4 4 4 4 4 4
35849+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
35850+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
35851+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
35852+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
35853+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
35854+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
35855+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
35856+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
35857+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
35858+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35859+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35862+4 4 4 4 4 4
35863+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
35864+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
35865+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
35866+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
35867+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
35868+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
35869+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
35870+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
35871+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
35872+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35873+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35876+4 4 4 4 4 4
35877+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
35878+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
35879+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
35880+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
35881+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
35882+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
35883+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
35884+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
35885+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
35886+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35887+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35890+4 4 4 4 4 4
35891+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
35892+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
35893+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
35894+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
35895+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
35896+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
35897+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
35898+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
35899+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
35900+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35901+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35904+4 4 4 4 4 4
35905+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
35906+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
35907+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
35908+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
35909+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
35910+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
35911+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
35912+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
35913+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
35914+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
35915+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35918+4 4 4 4 4 4
35919+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
35920+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
35921+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
35922+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
35923+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
35924+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
35925+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
35926+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
35927+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
35928+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
35929+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
35930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35932+4 4 4 4 4 4
35933+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
35934+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
35935+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
35936+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
35937+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
35938+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
35939+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
35940+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
35941+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
35942+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
35943+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35946+4 4 4 4 4 4
35947+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
35948+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
35949+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
35950+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
35951+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
35952+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
35953+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35954+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
35955+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
35956+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
35957+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
35958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35960+4 4 4 4 4 4
35961+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
35962+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
35963+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
35964+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
35965+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
35966+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
35967+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
35968+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
35969+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
35970+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
35971+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35974+4 4 4 4 4 4
35975+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
35976+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
35977+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
35978+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
35979+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
35980+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
35981+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
35982+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
35983+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
35984+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
35985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35988+4 4 4 4 4 4
35989+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
35990+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
35991+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
35992+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
35993+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
35994+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
35995+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
35996+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
35997+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
35998+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
35999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36002+4 4 4 4 4 4
36003+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
36004+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
36005+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
36006+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
36007+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
36008+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
36009+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
36010+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
36011+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
36012+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
36013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36016+4 4 4 4 4 4
36017+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
36018+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
36019+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
36020+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
36021+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
36022+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
36023+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
36024+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
36025+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
36026+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36030+4 4 4 4 4 4
36031+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
36032+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
36033+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
36034+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
36035+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
36036+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
36037+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
36038+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
36039+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
36040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36044+4 4 4 4 4 4
36045+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
36046+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
36047+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
36048+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
36049+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
36050+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
36051+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
36052+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
36053+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
36054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36058+4 4 4 4 4 4
36059+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
36060+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
36061+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
36062+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
36063+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
36064+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
36065+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
36066+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
36067+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36072+4 4 4 4 4 4
36073+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
36074+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
36075+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
36076+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
36077+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
36078+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
36079+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
36080+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
36081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36086+4 4 4 4 4 4
36087+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
36088+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
36089+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
36090+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
36091+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
36092+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
36093+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
36094+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
36095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36100+4 4 4 4 4 4
36101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
36102+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
36103+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
36104+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
36105+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
36106+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
36107+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
36108+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
36109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36114+4 4 4 4 4 4
36115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36116+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
36117+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
36118+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
36119+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
36120+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
36121+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
36122+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
36123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36128+4 4 4 4 4 4
36129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36130+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
36131+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
36132+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
36133+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
36134+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
36135+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
36136+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36142+4 4 4 4 4 4
36143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36145+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
36146+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
36147+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
36148+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
36149+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
36150+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36156+4 4 4 4 4 4
36157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
36160+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
36161+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
36162+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
36163+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
36164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36170+4 4 4 4 4 4
36171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36174+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
36175+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
36176+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
36177+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
36178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36184+4 4 4 4 4 4
36185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36188+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
36189+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
36190+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
36191+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
36192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36198+4 4 4 4 4 4
36199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36202+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
36203+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
36204+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
36205+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
36206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36212+4 4 4 4 4 4
36213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
36217+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
36218+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
36219+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36226+4 4 4 4 4 4
36227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36231+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
36232+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
36233+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
36234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36240+4 4 4 4 4 4
36241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36245+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
36246+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
36247+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36254+4 4 4 4 4 4
36255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36259+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
36260+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
36261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36268+4 4 4 4 4 4
36269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36273+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
36274+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
36275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36282+4 4 4 4 4 4
36283diff -urNp linux-3.0.4/drivers/video/udlfb.c linux-3.0.4/drivers/video/udlfb.c
36284--- linux-3.0.4/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
36285+++ linux-3.0.4/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
36286@@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
36287 dlfb_urb_completion(urb);
36288
36289 error:
36290- atomic_add(bytes_sent, &dev->bytes_sent);
36291- atomic_add(bytes_identical, &dev->bytes_identical);
36292- atomic_add(width*height*2, &dev->bytes_rendered);
36293+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
36294+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
36295+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
36296 end_cycles = get_cycles();
36297- atomic_add(((unsigned int) ((end_cycles - start_cycles)
36298+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
36299 >> 10)), /* Kcycles */
36300 &dev->cpu_kcycles_used);
36301
36302@@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
36303 dlfb_urb_completion(urb);
36304
36305 error:
36306- atomic_add(bytes_sent, &dev->bytes_sent);
36307- atomic_add(bytes_identical, &dev->bytes_identical);
36308- atomic_add(bytes_rendered, &dev->bytes_rendered);
36309+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
36310+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
36311+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
36312 end_cycles = get_cycles();
36313- atomic_add(((unsigned int) ((end_cycles - start_cycles)
36314+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
36315 >> 10)), /* Kcycles */
36316 &dev->cpu_kcycles_used);
36317 }
36318@@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
36319 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36320 struct dlfb_data *dev = fb_info->par;
36321 return snprintf(buf, PAGE_SIZE, "%u\n",
36322- atomic_read(&dev->bytes_rendered));
36323+ atomic_read_unchecked(&dev->bytes_rendered));
36324 }
36325
36326 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
36327@@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
36328 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36329 struct dlfb_data *dev = fb_info->par;
36330 return snprintf(buf, PAGE_SIZE, "%u\n",
36331- atomic_read(&dev->bytes_identical));
36332+ atomic_read_unchecked(&dev->bytes_identical));
36333 }
36334
36335 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
36336@@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
36337 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36338 struct dlfb_data *dev = fb_info->par;
36339 return snprintf(buf, PAGE_SIZE, "%u\n",
36340- atomic_read(&dev->bytes_sent));
36341+ atomic_read_unchecked(&dev->bytes_sent));
36342 }
36343
36344 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
36345@@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
36346 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36347 struct dlfb_data *dev = fb_info->par;
36348 return snprintf(buf, PAGE_SIZE, "%u\n",
36349- atomic_read(&dev->cpu_kcycles_used));
36350+ atomic_read_unchecked(&dev->cpu_kcycles_used));
36351 }
36352
36353 static ssize_t edid_show(
36354@@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
36355 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36356 struct dlfb_data *dev = fb_info->par;
36357
36358- atomic_set(&dev->bytes_rendered, 0);
36359- atomic_set(&dev->bytes_identical, 0);
36360- atomic_set(&dev->bytes_sent, 0);
36361- atomic_set(&dev->cpu_kcycles_used, 0);
36362+ atomic_set_unchecked(&dev->bytes_rendered, 0);
36363+ atomic_set_unchecked(&dev->bytes_identical, 0);
36364+ atomic_set_unchecked(&dev->bytes_sent, 0);
36365+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
36366
36367 return count;
36368 }
36369diff -urNp linux-3.0.4/drivers/video/uvesafb.c linux-3.0.4/drivers/video/uvesafb.c
36370--- linux-3.0.4/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
36371+++ linux-3.0.4/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
36372@@ -19,6 +19,7 @@
36373 #include <linux/io.h>
36374 #include <linux/mutex.h>
36375 #include <linux/slab.h>
36376+#include <linux/moduleloader.h>
36377 #include <video/edid.h>
36378 #include <video/uvesafb.h>
36379 #ifdef CONFIG_X86
36380@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
36381 NULL,
36382 };
36383
36384- return call_usermodehelper(v86d_path, argv, envp, 1);
36385+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
36386 }
36387
36388 /*
36389@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
36390 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
36391 par->pmi_setpal = par->ypan = 0;
36392 } else {
36393+
36394+#ifdef CONFIG_PAX_KERNEXEC
36395+#ifdef CONFIG_MODULES
36396+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
36397+#endif
36398+ if (!par->pmi_code) {
36399+ par->pmi_setpal = par->ypan = 0;
36400+ return 0;
36401+ }
36402+#endif
36403+
36404 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
36405 + task->t.regs.edi);
36406+
36407+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36408+ pax_open_kernel();
36409+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
36410+ pax_close_kernel();
36411+
36412+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
36413+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
36414+#else
36415 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
36416 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
36417+#endif
36418+
36419 printk(KERN_INFO "uvesafb: protected mode interface info at "
36420 "%04x:%04x\n",
36421 (u16)task->t.regs.es, (u16)task->t.regs.edi);
36422@@ -1821,6 +1844,11 @@ out:
36423 if (par->vbe_modes)
36424 kfree(par->vbe_modes);
36425
36426+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36427+ if (par->pmi_code)
36428+ module_free_exec(NULL, par->pmi_code);
36429+#endif
36430+
36431 framebuffer_release(info);
36432 return err;
36433 }
36434@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
36435 kfree(par->vbe_state_orig);
36436 if (par->vbe_state_saved)
36437 kfree(par->vbe_state_saved);
36438+
36439+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36440+ if (par->pmi_code)
36441+ module_free_exec(NULL, par->pmi_code);
36442+#endif
36443+
36444 }
36445
36446 framebuffer_release(info);
36447diff -urNp linux-3.0.4/drivers/video/vesafb.c linux-3.0.4/drivers/video/vesafb.c
36448--- linux-3.0.4/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
36449+++ linux-3.0.4/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
36450@@ -9,6 +9,7 @@
36451 */
36452
36453 #include <linux/module.h>
36454+#include <linux/moduleloader.h>
36455 #include <linux/kernel.h>
36456 #include <linux/errno.h>
36457 #include <linux/string.h>
36458@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
36459 static int vram_total __initdata; /* Set total amount of memory */
36460 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
36461 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
36462-static void (*pmi_start)(void) __read_mostly;
36463-static void (*pmi_pal) (void) __read_mostly;
36464+static void (*pmi_start)(void) __read_only;
36465+static void (*pmi_pal) (void) __read_only;
36466 static int depth __read_mostly;
36467 static int vga_compat __read_mostly;
36468 /* --------------------------------------------------------------------- */
36469@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
36470 unsigned int size_vmode;
36471 unsigned int size_remap;
36472 unsigned int size_total;
36473+ void *pmi_code = NULL;
36474
36475 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
36476 return -ENODEV;
36477@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
36478 size_remap = size_total;
36479 vesafb_fix.smem_len = size_remap;
36480
36481-#ifndef __i386__
36482- screen_info.vesapm_seg = 0;
36483-#endif
36484-
36485 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
36486 printk(KERN_WARNING
36487 "vesafb: cannot reserve video memory at 0x%lx\n",
36488@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
36489 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
36490 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
36491
36492+#ifdef __i386__
36493+
36494+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36495+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
36496+ if (!pmi_code)
36497+#elif !defined(CONFIG_PAX_KERNEXEC)
36498+ if (0)
36499+#endif
36500+
36501+#endif
36502+ screen_info.vesapm_seg = 0;
36503+
36504 if (screen_info.vesapm_seg) {
36505- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
36506- screen_info.vesapm_seg,screen_info.vesapm_off);
36507+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
36508+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
36509 }
36510
36511 if (screen_info.vesapm_seg < 0xc000)
36512@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
36513
36514 if (ypan || pmi_setpal) {
36515 unsigned short *pmi_base;
36516+
36517 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
36518- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
36519- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
36520+
36521+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36522+ pax_open_kernel();
36523+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
36524+#else
36525+ pmi_code = pmi_base;
36526+#endif
36527+
36528+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
36529+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
36530+
36531+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36532+ pmi_start = ktva_ktla(pmi_start);
36533+ pmi_pal = ktva_ktla(pmi_pal);
36534+ pax_close_kernel();
36535+#endif
36536+
36537 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
36538 if (pmi_base[3]) {
36539 printk(KERN_INFO "vesafb: pmi: ports = ");
36540@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
36541 info->node, info->fix.id);
36542 return 0;
36543 err:
36544+
36545+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36546+ module_free_exec(NULL, pmi_code);
36547+#endif
36548+
36549 if (info->screen_base)
36550 iounmap(info->screen_base);
36551 framebuffer_release(info);
36552diff -urNp linux-3.0.4/drivers/video/via/via_clock.h linux-3.0.4/drivers/video/via/via_clock.h
36553--- linux-3.0.4/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
36554+++ linux-3.0.4/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
36555@@ -56,7 +56,7 @@ struct via_clock {
36556
36557 void (*set_engine_pll_state)(u8 state);
36558 void (*set_engine_pll)(struct via_pll_config config);
36559-};
36560+} __no_const;
36561
36562
36563 static inline u32 get_pll_internal_frequency(u32 ref_freq,
36564diff -urNp linux-3.0.4/drivers/virtio/virtio_balloon.c linux-3.0.4/drivers/virtio/virtio_balloon.c
36565--- linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
36566+++ linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
36567@@ -174,6 +174,8 @@ static void update_balloon_stats(struct
36568 struct sysinfo i;
36569 int idx = 0;
36570
36571+ pax_track_stack();
36572+
36573 all_vm_events(events);
36574 si_meminfo(&i);
36575
36576diff -urNp linux-3.0.4/fs/9p/vfs_inode.c linux-3.0.4/fs/9p/vfs_inode.c
36577--- linux-3.0.4/fs/9p/vfs_inode.c 2011-07-21 22:17:23.000000000 -0400
36578+++ linux-3.0.4/fs/9p/vfs_inode.c 2011-08-23 21:47:56.000000000 -0400
36579@@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
36580 void
36581 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
36582 {
36583- char *s = nd_get_link(nd);
36584+ const char *s = nd_get_link(nd);
36585
36586 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
36587 IS_ERR(s) ? "<error>" : s);
36588diff -urNp linux-3.0.4/fs/aio.c linux-3.0.4/fs/aio.c
36589--- linux-3.0.4/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
36590+++ linux-3.0.4/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
36591@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
36592 size += sizeof(struct io_event) * nr_events;
36593 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
36594
36595- if (nr_pages < 0)
36596+ if (nr_pages <= 0)
36597 return -EINVAL;
36598
36599 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
36600@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
36601 struct aio_timeout to;
36602 int retry = 0;
36603
36604+ pax_track_stack();
36605+
36606 /* needed to zero any padding within an entry (there shouldn't be
36607 * any, but C is fun!
36608 */
36609@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
36610 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
36611 {
36612 ssize_t ret;
36613+ struct iovec iovstack;
36614
36615 #ifdef CONFIG_COMPAT
36616 if (compat)
36617 ret = compat_rw_copy_check_uvector(type,
36618 (struct compat_iovec __user *)kiocb->ki_buf,
36619- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
36620+ kiocb->ki_nbytes, 1, &iovstack,
36621 &kiocb->ki_iovec);
36622 else
36623 #endif
36624 ret = rw_copy_check_uvector(type,
36625 (struct iovec __user *)kiocb->ki_buf,
36626- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
36627+ kiocb->ki_nbytes, 1, &iovstack,
36628 &kiocb->ki_iovec);
36629 if (ret < 0)
36630 goto out;
36631
36632+ if (kiocb->ki_iovec == &iovstack) {
36633+ kiocb->ki_inline_vec = iovstack;
36634+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
36635+ }
36636 kiocb->ki_nr_segs = kiocb->ki_nbytes;
36637 kiocb->ki_cur_seg = 0;
36638 /* ki_nbytes/left now reflect bytes instead of segs */
36639diff -urNp linux-3.0.4/fs/attr.c linux-3.0.4/fs/attr.c
36640--- linux-3.0.4/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
36641+++ linux-3.0.4/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
36642@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
36643 unsigned long limit;
36644
36645 limit = rlimit(RLIMIT_FSIZE);
36646+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
36647 if (limit != RLIM_INFINITY && offset > limit)
36648 goto out_sig;
36649 if (offset > inode->i_sb->s_maxbytes)
36650diff -urNp linux-3.0.4/fs/befs/linuxvfs.c linux-3.0.4/fs/befs/linuxvfs.c
36651--- linux-3.0.4/fs/befs/linuxvfs.c 2011-09-02 18:11:26.000000000 -0400
36652+++ linux-3.0.4/fs/befs/linuxvfs.c 2011-08-29 23:26:27.000000000 -0400
36653@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
36654 {
36655 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
36656 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
36657- char *link = nd_get_link(nd);
36658+ const char *link = nd_get_link(nd);
36659 if (!IS_ERR(link))
36660 kfree(link);
36661 }
36662diff -urNp linux-3.0.4/fs/binfmt_aout.c linux-3.0.4/fs/binfmt_aout.c
36663--- linux-3.0.4/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
36664+++ linux-3.0.4/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
36665@@ -16,6 +16,7 @@
36666 #include <linux/string.h>
36667 #include <linux/fs.h>
36668 #include <linux/file.h>
36669+#include <linux/security.h>
36670 #include <linux/stat.h>
36671 #include <linux/fcntl.h>
36672 #include <linux/ptrace.h>
36673@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
36674 #endif
36675 # define START_STACK(u) ((void __user *)u.start_stack)
36676
36677+ memset(&dump, 0, sizeof(dump));
36678+
36679 fs = get_fs();
36680 set_fs(KERNEL_DS);
36681 has_dumped = 1;
36682@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
36683
36684 /* If the size of the dump file exceeds the rlimit, then see what would happen
36685 if we wrote the stack, but not the data area. */
36686+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
36687 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
36688 dump.u_dsize = 0;
36689
36690 /* Make sure we have enough room to write the stack and data areas. */
36691+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
36692 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
36693 dump.u_ssize = 0;
36694
36695@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
36696 rlim = rlimit(RLIMIT_DATA);
36697 if (rlim >= RLIM_INFINITY)
36698 rlim = ~0;
36699+
36700+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
36701 if (ex.a_data + ex.a_bss > rlim)
36702 return -ENOMEM;
36703
36704@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
36705 install_exec_creds(bprm);
36706 current->flags &= ~PF_FORKNOEXEC;
36707
36708+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36709+ current->mm->pax_flags = 0UL;
36710+#endif
36711+
36712+#ifdef CONFIG_PAX_PAGEEXEC
36713+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
36714+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
36715+
36716+#ifdef CONFIG_PAX_EMUTRAMP
36717+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
36718+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
36719+#endif
36720+
36721+#ifdef CONFIG_PAX_MPROTECT
36722+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
36723+ current->mm->pax_flags |= MF_PAX_MPROTECT;
36724+#endif
36725+
36726+ }
36727+#endif
36728+
36729 if (N_MAGIC(ex) == OMAGIC) {
36730 unsigned long text_addr, map_size;
36731 loff_t pos;
36732@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
36733
36734 down_write(&current->mm->mmap_sem);
36735 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
36736- PROT_READ | PROT_WRITE | PROT_EXEC,
36737+ PROT_READ | PROT_WRITE,
36738 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
36739 fd_offset + ex.a_text);
36740 up_write(&current->mm->mmap_sem);
36741diff -urNp linux-3.0.4/fs/binfmt_elf.c linux-3.0.4/fs/binfmt_elf.c
36742--- linux-3.0.4/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
36743+++ linux-3.0.4/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
36744@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
36745 #define elf_core_dump NULL
36746 #endif
36747
36748+#ifdef CONFIG_PAX_MPROTECT
36749+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
36750+#endif
36751+
36752 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
36753 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
36754 #else
36755@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
36756 .load_binary = load_elf_binary,
36757 .load_shlib = load_elf_library,
36758 .core_dump = elf_core_dump,
36759+
36760+#ifdef CONFIG_PAX_MPROTECT
36761+ .handle_mprotect= elf_handle_mprotect,
36762+#endif
36763+
36764 .min_coredump = ELF_EXEC_PAGESIZE,
36765 };
36766
36767@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
36768
36769 static int set_brk(unsigned long start, unsigned long end)
36770 {
36771+ unsigned long e = end;
36772+
36773 start = ELF_PAGEALIGN(start);
36774 end = ELF_PAGEALIGN(end);
36775 if (end > start) {
36776@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
36777 if (BAD_ADDR(addr))
36778 return addr;
36779 }
36780- current->mm->start_brk = current->mm->brk = end;
36781+ current->mm->start_brk = current->mm->brk = e;
36782 return 0;
36783 }
36784
36785@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
36786 elf_addr_t __user *u_rand_bytes;
36787 const char *k_platform = ELF_PLATFORM;
36788 const char *k_base_platform = ELF_BASE_PLATFORM;
36789- unsigned char k_rand_bytes[16];
36790+ u32 k_rand_bytes[4];
36791 int items;
36792 elf_addr_t *elf_info;
36793 int ei_index = 0;
36794 const struct cred *cred = current_cred();
36795 struct vm_area_struct *vma;
36796+ unsigned long saved_auxv[AT_VECTOR_SIZE];
36797+
36798+ pax_track_stack();
36799
36800 /*
36801 * In some cases (e.g. Hyper-Threading), we want to avoid L1
36802@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
36803 * Generate 16 random bytes for userspace PRNG seeding.
36804 */
36805 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
36806- u_rand_bytes = (elf_addr_t __user *)
36807- STACK_ALLOC(p, sizeof(k_rand_bytes));
36808+ srandom32(k_rand_bytes[0] ^ random32());
36809+ srandom32(k_rand_bytes[1] ^ random32());
36810+ srandom32(k_rand_bytes[2] ^ random32());
36811+ srandom32(k_rand_bytes[3] ^ random32());
36812+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
36813+ u_rand_bytes = (elf_addr_t __user *) p;
36814 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
36815 return -EFAULT;
36816
36817@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
36818 return -EFAULT;
36819 current->mm->env_end = p;
36820
36821+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
36822+
36823 /* Put the elf_info on the stack in the right place. */
36824 sp = (elf_addr_t __user *)envp + 1;
36825- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
36826+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
36827 return -EFAULT;
36828 return 0;
36829 }
36830@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
36831 {
36832 struct elf_phdr *elf_phdata;
36833 struct elf_phdr *eppnt;
36834- unsigned long load_addr = 0;
36835+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
36836 int load_addr_set = 0;
36837 unsigned long last_bss = 0, elf_bss = 0;
36838- unsigned long error = ~0UL;
36839+ unsigned long error = -EINVAL;
36840 unsigned long total_size;
36841 int retval, i, size;
36842
36843@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
36844 goto out_close;
36845 }
36846
36847+#ifdef CONFIG_PAX_SEGMEXEC
36848+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
36849+ pax_task_size = SEGMEXEC_TASK_SIZE;
36850+#endif
36851+
36852 eppnt = elf_phdata;
36853 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
36854 if (eppnt->p_type == PT_LOAD) {
36855@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
36856 k = load_addr + eppnt->p_vaddr;
36857 if (BAD_ADDR(k) ||
36858 eppnt->p_filesz > eppnt->p_memsz ||
36859- eppnt->p_memsz > TASK_SIZE ||
36860- TASK_SIZE - eppnt->p_memsz < k) {
36861+ eppnt->p_memsz > pax_task_size ||
36862+ pax_task_size - eppnt->p_memsz < k) {
36863 error = -ENOMEM;
36864 goto out_close;
36865 }
36866@@ -528,6 +553,193 @@ out:
36867 return error;
36868 }
36869
36870+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
36871+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
36872+{
36873+ unsigned long pax_flags = 0UL;
36874+
36875+#ifdef CONFIG_PAX_PAGEEXEC
36876+ if (elf_phdata->p_flags & PF_PAGEEXEC)
36877+ pax_flags |= MF_PAX_PAGEEXEC;
36878+#endif
36879+
36880+#ifdef CONFIG_PAX_SEGMEXEC
36881+ if (elf_phdata->p_flags & PF_SEGMEXEC)
36882+ pax_flags |= MF_PAX_SEGMEXEC;
36883+#endif
36884+
36885+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36886+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36887+ if ((__supported_pte_mask & _PAGE_NX))
36888+ pax_flags &= ~MF_PAX_SEGMEXEC;
36889+ else
36890+ pax_flags &= ~MF_PAX_PAGEEXEC;
36891+ }
36892+#endif
36893+
36894+#ifdef CONFIG_PAX_EMUTRAMP
36895+ if (elf_phdata->p_flags & PF_EMUTRAMP)
36896+ pax_flags |= MF_PAX_EMUTRAMP;
36897+#endif
36898+
36899+#ifdef CONFIG_PAX_MPROTECT
36900+ if (elf_phdata->p_flags & PF_MPROTECT)
36901+ pax_flags |= MF_PAX_MPROTECT;
36902+#endif
36903+
36904+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36905+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
36906+ pax_flags |= MF_PAX_RANDMMAP;
36907+#endif
36908+
36909+ return pax_flags;
36910+}
36911+#endif
36912+
36913+#ifdef CONFIG_PAX_PT_PAX_FLAGS
36914+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
36915+{
36916+ unsigned long pax_flags = 0UL;
36917+
36918+#ifdef CONFIG_PAX_PAGEEXEC
36919+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
36920+ pax_flags |= MF_PAX_PAGEEXEC;
36921+#endif
36922+
36923+#ifdef CONFIG_PAX_SEGMEXEC
36924+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
36925+ pax_flags |= MF_PAX_SEGMEXEC;
36926+#endif
36927+
36928+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36929+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36930+ if ((__supported_pte_mask & _PAGE_NX))
36931+ pax_flags &= ~MF_PAX_SEGMEXEC;
36932+ else
36933+ pax_flags &= ~MF_PAX_PAGEEXEC;
36934+ }
36935+#endif
36936+
36937+#ifdef CONFIG_PAX_EMUTRAMP
36938+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
36939+ pax_flags |= MF_PAX_EMUTRAMP;
36940+#endif
36941+
36942+#ifdef CONFIG_PAX_MPROTECT
36943+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
36944+ pax_flags |= MF_PAX_MPROTECT;
36945+#endif
36946+
36947+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36948+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
36949+ pax_flags |= MF_PAX_RANDMMAP;
36950+#endif
36951+
36952+ return pax_flags;
36953+}
36954+#endif
36955+
36956+#ifdef CONFIG_PAX_EI_PAX
36957+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
36958+{
36959+ unsigned long pax_flags = 0UL;
36960+
36961+#ifdef CONFIG_PAX_PAGEEXEC
36962+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
36963+ pax_flags |= MF_PAX_PAGEEXEC;
36964+#endif
36965+
36966+#ifdef CONFIG_PAX_SEGMEXEC
36967+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
36968+ pax_flags |= MF_PAX_SEGMEXEC;
36969+#endif
36970+
36971+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36972+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36973+ if ((__supported_pte_mask & _PAGE_NX))
36974+ pax_flags &= ~MF_PAX_SEGMEXEC;
36975+ else
36976+ pax_flags &= ~MF_PAX_PAGEEXEC;
36977+ }
36978+#endif
36979+
36980+#ifdef CONFIG_PAX_EMUTRAMP
36981+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
36982+ pax_flags |= MF_PAX_EMUTRAMP;
36983+#endif
36984+
36985+#ifdef CONFIG_PAX_MPROTECT
36986+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
36987+ pax_flags |= MF_PAX_MPROTECT;
36988+#endif
36989+
36990+#ifdef CONFIG_PAX_ASLR
36991+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
36992+ pax_flags |= MF_PAX_RANDMMAP;
36993+#endif
36994+
36995+ return pax_flags;
36996+}
36997+#endif
36998+
36999+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
37000+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
37001+{
37002+ unsigned long pax_flags = 0UL;
37003+
37004+#ifdef CONFIG_PAX_PT_PAX_FLAGS
37005+ unsigned long i;
37006+ int found_flags = 0;
37007+#endif
37008+
37009+#ifdef CONFIG_PAX_EI_PAX
37010+ pax_flags = pax_parse_ei_pax(elf_ex);
37011+#endif
37012+
37013+#ifdef CONFIG_PAX_PT_PAX_FLAGS
37014+ for (i = 0UL; i < elf_ex->e_phnum; i++)
37015+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
37016+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
37017+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
37018+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
37019+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
37020+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
37021+ return -EINVAL;
37022+
37023+#ifdef CONFIG_PAX_SOFTMODE
37024+ if (pax_softmode)
37025+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
37026+ else
37027+#endif
37028+
37029+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
37030+ found_flags = 1;
37031+ break;
37032+ }
37033+#endif
37034+
37035+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
37036+ if (found_flags == 0) {
37037+ struct elf_phdr phdr;
37038+ memset(&phdr, 0, sizeof(phdr));
37039+ phdr.p_flags = PF_NOEMUTRAMP;
37040+#ifdef CONFIG_PAX_SOFTMODE
37041+ if (pax_softmode)
37042+ pax_flags = pax_parse_softmode(&phdr);
37043+ else
37044+#endif
37045+ pax_flags = pax_parse_hardmode(&phdr);
37046+ }
37047+#endif
37048+
37049+ if (0 > pax_check_flags(&pax_flags))
37050+ return -EINVAL;
37051+
37052+ current->mm->pax_flags = pax_flags;
37053+ return 0;
37054+}
37055+#endif
37056+
37057 /*
37058 * These are the functions used to load ELF style executables and shared
37059 * libraries. There is no binary dependent code anywhere else.
37060@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
37061 {
37062 unsigned int random_variable = 0;
37063
37064+#ifdef CONFIG_PAX_RANDUSTACK
37065+ if (randomize_va_space)
37066+ return stack_top - current->mm->delta_stack;
37067+#endif
37068+
37069 if ((current->flags & PF_RANDOMIZE) &&
37070 !(current->personality & ADDR_NO_RANDOMIZE)) {
37071 random_variable = get_random_int() & STACK_RND_MASK;
37072@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
37073 unsigned long load_addr = 0, load_bias = 0;
37074 int load_addr_set = 0;
37075 char * elf_interpreter = NULL;
37076- unsigned long error;
37077+ unsigned long error = 0;
37078 struct elf_phdr *elf_ppnt, *elf_phdata;
37079 unsigned long elf_bss, elf_brk;
37080 int retval, i;
37081@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
37082 unsigned long start_code, end_code, start_data, end_data;
37083 unsigned long reloc_func_desc __maybe_unused = 0;
37084 int executable_stack = EXSTACK_DEFAULT;
37085- unsigned long def_flags = 0;
37086 struct {
37087 struct elfhdr elf_ex;
37088 struct elfhdr interp_elf_ex;
37089 } *loc;
37090+ unsigned long pax_task_size = TASK_SIZE;
37091
37092 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
37093 if (!loc) {
37094@@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
37095
37096 /* OK, This is the point of no return */
37097 current->flags &= ~PF_FORKNOEXEC;
37098- current->mm->def_flags = def_flags;
37099+
37100+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
37101+ current->mm->pax_flags = 0UL;
37102+#endif
37103+
37104+#ifdef CONFIG_PAX_DLRESOLVE
37105+ current->mm->call_dl_resolve = 0UL;
37106+#endif
37107+
37108+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
37109+ current->mm->call_syscall = 0UL;
37110+#endif
37111+
37112+#ifdef CONFIG_PAX_ASLR
37113+ current->mm->delta_mmap = 0UL;
37114+ current->mm->delta_stack = 0UL;
37115+#endif
37116+
37117+ current->mm->def_flags = 0;
37118+
37119+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
37120+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
37121+ send_sig(SIGKILL, current, 0);
37122+ goto out_free_dentry;
37123+ }
37124+#endif
37125+
37126+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
37127+ pax_set_initial_flags(bprm);
37128+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
37129+ if (pax_set_initial_flags_func)
37130+ (pax_set_initial_flags_func)(bprm);
37131+#endif
37132+
37133+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
37134+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
37135+ current->mm->context.user_cs_limit = PAGE_SIZE;
37136+ current->mm->def_flags |= VM_PAGEEXEC;
37137+ }
37138+#endif
37139+
37140+#ifdef CONFIG_PAX_SEGMEXEC
37141+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
37142+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
37143+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
37144+ pax_task_size = SEGMEXEC_TASK_SIZE;
37145+ current->mm->def_flags |= VM_NOHUGEPAGE;
37146+ }
37147+#endif
37148+
37149+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
37150+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37151+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
37152+ put_cpu();
37153+ }
37154+#endif
37155
37156 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
37157 may depend on the personality. */
37158 SET_PERSONALITY(loc->elf_ex);
37159+
37160+#ifdef CONFIG_PAX_ASLR
37161+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
37162+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
37163+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
37164+ }
37165+#endif
37166+
37167+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37168+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37169+ executable_stack = EXSTACK_DISABLE_X;
37170+ current->personality &= ~READ_IMPLIES_EXEC;
37171+ } else
37172+#endif
37173+
37174 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
37175 current->personality |= READ_IMPLIES_EXEC;
37176
37177@@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
37178 #else
37179 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
37180 #endif
37181+
37182+#ifdef CONFIG_PAX_RANDMMAP
37183+ /* PaX: randomize base address at the default exe base if requested */
37184+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
37185+#ifdef CONFIG_SPARC64
37186+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
37187+#else
37188+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
37189+#endif
37190+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
37191+ elf_flags |= MAP_FIXED;
37192+ }
37193+#endif
37194+
37195 }
37196
37197 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
37198@@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
37199 * allowed task size. Note that p_filesz must always be
37200 * <= p_memsz so it is only necessary to check p_memsz.
37201 */
37202- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37203- elf_ppnt->p_memsz > TASK_SIZE ||
37204- TASK_SIZE - elf_ppnt->p_memsz < k) {
37205+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37206+ elf_ppnt->p_memsz > pax_task_size ||
37207+ pax_task_size - elf_ppnt->p_memsz < k) {
37208 /* set_brk can never work. Avoid overflows. */
37209 send_sig(SIGKILL, current, 0);
37210 retval = -EINVAL;
37211@@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
37212 start_data += load_bias;
37213 end_data += load_bias;
37214
37215+#ifdef CONFIG_PAX_RANDMMAP
37216+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
37217+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
37218+#endif
37219+
37220 /* Calling set_brk effectively mmaps the pages that we need
37221 * for the bss and break sections. We must do this before
37222 * mapping in the interpreter, to make sure it doesn't wind
37223@@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
37224 goto out_free_dentry;
37225 }
37226 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
37227- send_sig(SIGSEGV, current, 0);
37228- retval = -EFAULT; /* Nobody gets to see this, but.. */
37229- goto out_free_dentry;
37230+ /*
37231+ * This bss-zeroing can fail if the ELF
37232+ * file specifies odd protections. So
37233+ * we don't check the return value
37234+ */
37235 }
37236
37237 if (elf_interpreter) {
37238@@ -1090,7 +1398,7 @@ out:
37239 * Decide what to dump of a segment, part, all or none.
37240 */
37241 static unsigned long vma_dump_size(struct vm_area_struct *vma,
37242- unsigned long mm_flags)
37243+ unsigned long mm_flags, long signr)
37244 {
37245 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
37246
37247@@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
37248 if (vma->vm_file == NULL)
37249 return 0;
37250
37251- if (FILTER(MAPPED_PRIVATE))
37252+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
37253 goto whole;
37254
37255 /*
37256@@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
37257 {
37258 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
37259 int i = 0;
37260- do
37261+ do {
37262 i += 2;
37263- while (auxv[i - 2] != AT_NULL);
37264+ } while (auxv[i - 2] != AT_NULL);
37265 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
37266 }
37267
37268@@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
37269 }
37270
37271 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
37272- unsigned long mm_flags)
37273+ struct coredump_params *cprm)
37274 {
37275 struct vm_area_struct *vma;
37276 size_t size = 0;
37277
37278 for (vma = first_vma(current, gate_vma); vma != NULL;
37279 vma = next_vma(vma, gate_vma))
37280- size += vma_dump_size(vma, mm_flags);
37281+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37282 return size;
37283 }
37284
37285@@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
37286
37287 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
37288
37289- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
37290+ offset += elf_core_vma_data_size(gate_vma, cprm);
37291 offset += elf_core_extra_data_size();
37292 e_shoff = offset;
37293
37294@@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
37295 offset = dataoff;
37296
37297 size += sizeof(*elf);
37298+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
37299 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
37300 goto end_coredump;
37301
37302 size += sizeof(*phdr4note);
37303+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
37304 if (size > cprm->limit
37305 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
37306 goto end_coredump;
37307@@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
37308 phdr.p_offset = offset;
37309 phdr.p_vaddr = vma->vm_start;
37310 phdr.p_paddr = 0;
37311- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
37312+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37313 phdr.p_memsz = vma->vm_end - vma->vm_start;
37314 offset += phdr.p_filesz;
37315 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
37316@@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
37317 phdr.p_align = ELF_EXEC_PAGESIZE;
37318
37319 size += sizeof(phdr);
37320+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
37321 if (size > cprm->limit
37322 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
37323 goto end_coredump;
37324@@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
37325 unsigned long addr;
37326 unsigned long end;
37327
37328- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
37329+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37330
37331 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
37332 struct page *page;
37333@@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
37334 page = get_dump_page(addr);
37335 if (page) {
37336 void *kaddr = kmap(page);
37337+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
37338 stop = ((size += PAGE_SIZE) > cprm->limit) ||
37339 !dump_write(cprm->file, kaddr,
37340 PAGE_SIZE);
37341@@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
37342
37343 if (e_phnum == PN_XNUM) {
37344 size += sizeof(*shdr4extnum);
37345+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
37346 if (size > cprm->limit
37347 || !dump_write(cprm->file, shdr4extnum,
37348 sizeof(*shdr4extnum)))
37349@@ -2067,6 +2380,97 @@ out:
37350
37351 #endif /* CONFIG_ELF_CORE */
37352
37353+#ifdef CONFIG_PAX_MPROTECT
37354+/* PaX: non-PIC ELF libraries need relocations on their executable segments
37355+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
37356+ * we'll remove VM_MAYWRITE for good on RELRO segments.
37357+ *
37358+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
37359+ * basis because we want to allow the common case and not the special ones.
37360+ */
37361+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
37362+{
37363+ struct elfhdr elf_h;
37364+ struct elf_phdr elf_p;
37365+ unsigned long i;
37366+ unsigned long oldflags;
37367+ bool is_textrel_rw, is_textrel_rx, is_relro;
37368+
37369+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
37370+ return;
37371+
37372+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
37373+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
37374+
37375+#ifdef CONFIG_PAX_ELFRELOCS
37376+ /* possible TEXTREL */
37377+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
37378+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
37379+#else
37380+ is_textrel_rw = false;
37381+ is_textrel_rx = false;
37382+#endif
37383+
37384+ /* possible RELRO */
37385+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
37386+
37387+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
37388+ return;
37389+
37390+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
37391+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
37392+
37393+#ifdef CONFIG_PAX_ETEXECRELOCS
37394+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37395+#else
37396+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
37397+#endif
37398+
37399+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37400+ !elf_check_arch(&elf_h) ||
37401+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
37402+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
37403+ return;
37404+
37405+ for (i = 0UL; i < elf_h.e_phnum; i++) {
37406+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
37407+ return;
37408+ switch (elf_p.p_type) {
37409+ case PT_DYNAMIC:
37410+ if (!is_textrel_rw && !is_textrel_rx)
37411+ continue;
37412+ i = 0UL;
37413+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
37414+ elf_dyn dyn;
37415+
37416+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
37417+ return;
37418+ if (dyn.d_tag == DT_NULL)
37419+ return;
37420+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
37421+ gr_log_textrel(vma);
37422+ if (is_textrel_rw)
37423+ vma->vm_flags |= VM_MAYWRITE;
37424+ else
37425+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
37426+ vma->vm_flags &= ~VM_MAYWRITE;
37427+ return;
37428+ }
37429+ i++;
37430+ }
37431+ return;
37432+
37433+ case PT_GNU_RELRO:
37434+ if (!is_relro)
37435+ continue;
37436+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
37437+ vma->vm_flags &= ~VM_MAYWRITE;
37438+ return;
37439+ }
37440+ }
37441+}
37442+#endif
37443+
37444 static int __init init_elf_binfmt(void)
37445 {
37446 return register_binfmt(&elf_format);
37447diff -urNp linux-3.0.4/fs/binfmt_flat.c linux-3.0.4/fs/binfmt_flat.c
37448--- linux-3.0.4/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
37449+++ linux-3.0.4/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
37450@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
37451 realdatastart = (unsigned long) -ENOMEM;
37452 printk("Unable to allocate RAM for process data, errno %d\n",
37453 (int)-realdatastart);
37454+ down_write(&current->mm->mmap_sem);
37455 do_munmap(current->mm, textpos, text_len);
37456+ up_write(&current->mm->mmap_sem);
37457 ret = realdatastart;
37458 goto err;
37459 }
37460@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
37461 }
37462 if (IS_ERR_VALUE(result)) {
37463 printk("Unable to read data+bss, errno %d\n", (int)-result);
37464+ down_write(&current->mm->mmap_sem);
37465 do_munmap(current->mm, textpos, text_len);
37466 do_munmap(current->mm, realdatastart, len);
37467+ up_write(&current->mm->mmap_sem);
37468 ret = result;
37469 goto err;
37470 }
37471@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
37472 }
37473 if (IS_ERR_VALUE(result)) {
37474 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
37475+ down_write(&current->mm->mmap_sem);
37476 do_munmap(current->mm, textpos, text_len + data_len + extra +
37477 MAX_SHARED_LIBS * sizeof(unsigned long));
37478+ up_write(&current->mm->mmap_sem);
37479 ret = result;
37480 goto err;
37481 }
37482diff -urNp linux-3.0.4/fs/bio.c linux-3.0.4/fs/bio.c
37483--- linux-3.0.4/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
37484+++ linux-3.0.4/fs/bio.c 2011-08-23 21:47:56.000000000 -0400
37485@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
37486 const int read = bio_data_dir(bio) == READ;
37487 struct bio_map_data *bmd = bio->bi_private;
37488 int i;
37489- char *p = bmd->sgvecs[0].iov_base;
37490+ char *p = (__force char *)bmd->sgvecs[0].iov_base;
37491
37492 __bio_for_each_segment(bvec, bio, i, 0) {
37493 char *addr = page_address(bvec->bv_page);
37494diff -urNp linux-3.0.4/fs/block_dev.c linux-3.0.4/fs/block_dev.c
37495--- linux-3.0.4/fs/block_dev.c 2011-07-21 22:17:23.000000000 -0400
37496+++ linux-3.0.4/fs/block_dev.c 2011-08-23 21:47:56.000000000 -0400
37497@@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
37498 else if (bdev->bd_contains == bdev)
37499 return true; /* is a whole device which isn't held */
37500
37501- else if (whole->bd_holder == bd_may_claim)
37502+ else if (whole->bd_holder == (void *)bd_may_claim)
37503 return true; /* is a partition of a device that is being partitioned */
37504 else if (whole->bd_holder != NULL)
37505 return false; /* is a partition of a held device */
37506diff -urNp linux-3.0.4/fs/btrfs/ctree.c linux-3.0.4/fs/btrfs/ctree.c
37507--- linux-3.0.4/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
37508+++ linux-3.0.4/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
37509@@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
37510 free_extent_buffer(buf);
37511 add_root_to_dirty_list(root);
37512 } else {
37513- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
37514- parent_start = parent->start;
37515- else
37516+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
37517+ if (parent)
37518+ parent_start = parent->start;
37519+ else
37520+ parent_start = 0;
37521+ } else
37522 parent_start = 0;
37523
37524 WARN_ON(trans->transid != btrfs_header_generation(parent));
37525diff -urNp linux-3.0.4/fs/btrfs/inode.c linux-3.0.4/fs/btrfs/inode.c
37526--- linux-3.0.4/fs/btrfs/inode.c 2011-07-21 22:17:23.000000000 -0400
37527+++ linux-3.0.4/fs/btrfs/inode.c 2011-08-23 21:48:14.000000000 -0400
37528@@ -6895,7 +6895,7 @@ fail:
37529 return -ENOMEM;
37530 }
37531
37532-static int btrfs_getattr(struct vfsmount *mnt,
37533+int btrfs_getattr(struct vfsmount *mnt,
37534 struct dentry *dentry, struct kstat *stat)
37535 {
37536 struct inode *inode = dentry->d_inode;
37537@@ -6907,6 +6907,14 @@ static int btrfs_getattr(struct vfsmount
37538 return 0;
37539 }
37540
37541+EXPORT_SYMBOL(btrfs_getattr);
37542+
37543+dev_t get_btrfs_dev_from_inode(struct inode *inode)
37544+{
37545+ return BTRFS_I(inode)->root->anon_super.s_dev;
37546+}
37547+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
37548+
37549 /*
37550 * If a file is moved, it will inherit the cow and compression flags of the new
37551 * directory.
37552diff -urNp linux-3.0.4/fs/btrfs/ioctl.c linux-3.0.4/fs/btrfs/ioctl.c
37553--- linux-3.0.4/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
37554+++ linux-3.0.4/fs/btrfs/ioctl.c 2011-08-23 21:48:14.000000000 -0400
37555@@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
37556 for (i = 0; i < num_types; i++) {
37557 struct btrfs_space_info *tmp;
37558
37559+ /* Don't copy in more than we allocated */
37560 if (!slot_count)
37561 break;
37562
37563+ slot_count--;
37564+
37565 info = NULL;
37566 rcu_read_lock();
37567 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
37568@@ -2700,10 +2703,7 @@ long btrfs_ioctl_space_info(struct btrfs
37569 memcpy(dest, &space, sizeof(space));
37570 dest++;
37571 space_args.total_spaces++;
37572- slot_count--;
37573 }
37574- if (!slot_count)
37575- break;
37576 }
37577 up_read(&info->groups_sem);
37578 }
37579diff -urNp linux-3.0.4/fs/btrfs/relocation.c linux-3.0.4/fs/btrfs/relocation.c
37580--- linux-3.0.4/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
37581+++ linux-3.0.4/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
37582@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
37583 }
37584 spin_unlock(&rc->reloc_root_tree.lock);
37585
37586- BUG_ON((struct btrfs_root *)node->data != root);
37587+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
37588
37589 if (!del) {
37590 spin_lock(&rc->reloc_root_tree.lock);
37591diff -urNp linux-3.0.4/fs/cachefiles/bind.c linux-3.0.4/fs/cachefiles/bind.c
37592--- linux-3.0.4/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
37593+++ linux-3.0.4/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
37594@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
37595 args);
37596
37597 /* start by checking things over */
37598- ASSERT(cache->fstop_percent >= 0 &&
37599- cache->fstop_percent < cache->fcull_percent &&
37600+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
37601 cache->fcull_percent < cache->frun_percent &&
37602 cache->frun_percent < 100);
37603
37604- ASSERT(cache->bstop_percent >= 0 &&
37605- cache->bstop_percent < cache->bcull_percent &&
37606+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
37607 cache->bcull_percent < cache->brun_percent &&
37608 cache->brun_percent < 100);
37609
37610diff -urNp linux-3.0.4/fs/cachefiles/daemon.c linux-3.0.4/fs/cachefiles/daemon.c
37611--- linux-3.0.4/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
37612+++ linux-3.0.4/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
37613@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
37614 if (n > buflen)
37615 return -EMSGSIZE;
37616
37617- if (copy_to_user(_buffer, buffer, n) != 0)
37618+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
37619 return -EFAULT;
37620
37621 return n;
37622@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
37623 if (test_bit(CACHEFILES_DEAD, &cache->flags))
37624 return -EIO;
37625
37626- if (datalen < 0 || datalen > PAGE_SIZE - 1)
37627+ if (datalen > PAGE_SIZE - 1)
37628 return -EOPNOTSUPP;
37629
37630 /* drag the command string into the kernel so we can parse it */
37631@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
37632 if (args[0] != '%' || args[1] != '\0')
37633 return -EINVAL;
37634
37635- if (fstop < 0 || fstop >= cache->fcull_percent)
37636+ if (fstop >= cache->fcull_percent)
37637 return cachefiles_daemon_range_error(cache, args);
37638
37639 cache->fstop_percent = fstop;
37640@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
37641 if (args[0] != '%' || args[1] != '\0')
37642 return -EINVAL;
37643
37644- if (bstop < 0 || bstop >= cache->bcull_percent)
37645+ if (bstop >= cache->bcull_percent)
37646 return cachefiles_daemon_range_error(cache, args);
37647
37648 cache->bstop_percent = bstop;
37649diff -urNp linux-3.0.4/fs/cachefiles/internal.h linux-3.0.4/fs/cachefiles/internal.h
37650--- linux-3.0.4/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
37651+++ linux-3.0.4/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
37652@@ -57,7 +57,7 @@ struct cachefiles_cache {
37653 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
37654 struct rb_root active_nodes; /* active nodes (can't be culled) */
37655 rwlock_t active_lock; /* lock for active_nodes */
37656- atomic_t gravecounter; /* graveyard uniquifier */
37657+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
37658 unsigned frun_percent; /* when to stop culling (% files) */
37659 unsigned fcull_percent; /* when to start culling (% files) */
37660 unsigned fstop_percent; /* when to stop allocating (% files) */
37661@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
37662 * proc.c
37663 */
37664 #ifdef CONFIG_CACHEFILES_HISTOGRAM
37665-extern atomic_t cachefiles_lookup_histogram[HZ];
37666-extern atomic_t cachefiles_mkdir_histogram[HZ];
37667-extern atomic_t cachefiles_create_histogram[HZ];
37668+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37669+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37670+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
37671
37672 extern int __init cachefiles_proc_init(void);
37673 extern void cachefiles_proc_cleanup(void);
37674 static inline
37675-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
37676+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
37677 {
37678 unsigned long jif = jiffies - start_jif;
37679 if (jif >= HZ)
37680 jif = HZ - 1;
37681- atomic_inc(&histogram[jif]);
37682+ atomic_inc_unchecked(&histogram[jif]);
37683 }
37684
37685 #else
37686diff -urNp linux-3.0.4/fs/cachefiles/namei.c linux-3.0.4/fs/cachefiles/namei.c
37687--- linux-3.0.4/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
37688+++ linux-3.0.4/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
37689@@ -318,7 +318,7 @@ try_again:
37690 /* first step is to make up a grave dentry in the graveyard */
37691 sprintf(nbuffer, "%08x%08x",
37692 (uint32_t) get_seconds(),
37693- (uint32_t) atomic_inc_return(&cache->gravecounter));
37694+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
37695
37696 /* do the multiway lock magic */
37697 trap = lock_rename(cache->graveyard, dir);
37698diff -urNp linux-3.0.4/fs/cachefiles/proc.c linux-3.0.4/fs/cachefiles/proc.c
37699--- linux-3.0.4/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
37700+++ linux-3.0.4/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
37701@@ -14,9 +14,9 @@
37702 #include <linux/seq_file.h>
37703 #include "internal.h"
37704
37705-atomic_t cachefiles_lookup_histogram[HZ];
37706-atomic_t cachefiles_mkdir_histogram[HZ];
37707-atomic_t cachefiles_create_histogram[HZ];
37708+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37709+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37710+atomic_unchecked_t cachefiles_create_histogram[HZ];
37711
37712 /*
37713 * display the latency histogram
37714@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
37715 return 0;
37716 default:
37717 index = (unsigned long) v - 3;
37718- x = atomic_read(&cachefiles_lookup_histogram[index]);
37719- y = atomic_read(&cachefiles_mkdir_histogram[index]);
37720- z = atomic_read(&cachefiles_create_histogram[index]);
37721+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
37722+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
37723+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
37724 if (x == 0 && y == 0 && z == 0)
37725 return 0;
37726
37727diff -urNp linux-3.0.4/fs/cachefiles/rdwr.c linux-3.0.4/fs/cachefiles/rdwr.c
37728--- linux-3.0.4/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
37729+++ linux-3.0.4/fs/cachefiles/rdwr.c 2011-08-23 21:47:56.000000000 -0400
37730@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
37731 old_fs = get_fs();
37732 set_fs(KERNEL_DS);
37733 ret = file->f_op->write(
37734- file, (const void __user *) data, len, &pos);
37735+ file, (__force const void __user *) data, len, &pos);
37736 set_fs(old_fs);
37737 kunmap(page);
37738 if (ret != len)
37739diff -urNp linux-3.0.4/fs/ceph/dir.c linux-3.0.4/fs/ceph/dir.c
37740--- linux-3.0.4/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
37741+++ linux-3.0.4/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
37742@@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
37743 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
37744 struct ceph_mds_client *mdsc = fsc->mdsc;
37745 unsigned frag = fpos_frag(filp->f_pos);
37746- int off = fpos_off(filp->f_pos);
37747+ unsigned int off = fpos_off(filp->f_pos);
37748 int err;
37749 u32 ftype;
37750 struct ceph_mds_reply_info_parsed *rinfo;
37751diff -urNp linux-3.0.4/fs/cifs/cifs_debug.c linux-3.0.4/fs/cifs/cifs_debug.c
37752--- linux-3.0.4/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
37753+++ linux-3.0.4/fs/cifs/cifs_debug.c 2011-08-25 17:18:05.000000000 -0400
37754@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
37755
37756 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
37757 #ifdef CONFIG_CIFS_STATS2
37758- atomic_set(&totBufAllocCount, 0);
37759- atomic_set(&totSmBufAllocCount, 0);
37760+ atomic_set_unchecked(&totBufAllocCount, 0);
37761+ atomic_set_unchecked(&totSmBufAllocCount, 0);
37762 #endif /* CONFIG_CIFS_STATS2 */
37763 spin_lock(&cifs_tcp_ses_lock);
37764 list_for_each(tmp1, &cifs_tcp_ses_list) {
37765@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
37766 tcon = list_entry(tmp3,
37767 struct cifs_tcon,
37768 tcon_list);
37769- atomic_set(&tcon->num_smbs_sent, 0);
37770- atomic_set(&tcon->num_writes, 0);
37771- atomic_set(&tcon->num_reads, 0);
37772- atomic_set(&tcon->num_oplock_brks, 0);
37773- atomic_set(&tcon->num_opens, 0);
37774- atomic_set(&tcon->num_posixopens, 0);
37775- atomic_set(&tcon->num_posixmkdirs, 0);
37776- atomic_set(&tcon->num_closes, 0);
37777- atomic_set(&tcon->num_deletes, 0);
37778- atomic_set(&tcon->num_mkdirs, 0);
37779- atomic_set(&tcon->num_rmdirs, 0);
37780- atomic_set(&tcon->num_renames, 0);
37781- atomic_set(&tcon->num_t2renames, 0);
37782- atomic_set(&tcon->num_ffirst, 0);
37783- atomic_set(&tcon->num_fnext, 0);
37784- atomic_set(&tcon->num_fclose, 0);
37785- atomic_set(&tcon->num_hardlinks, 0);
37786- atomic_set(&tcon->num_symlinks, 0);
37787- atomic_set(&tcon->num_locks, 0);
37788+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
37789+ atomic_set_unchecked(&tcon->num_writes, 0);
37790+ atomic_set_unchecked(&tcon->num_reads, 0);
37791+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
37792+ atomic_set_unchecked(&tcon->num_opens, 0);
37793+ atomic_set_unchecked(&tcon->num_posixopens, 0);
37794+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
37795+ atomic_set_unchecked(&tcon->num_closes, 0);
37796+ atomic_set_unchecked(&tcon->num_deletes, 0);
37797+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
37798+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
37799+ atomic_set_unchecked(&tcon->num_renames, 0);
37800+ atomic_set_unchecked(&tcon->num_t2renames, 0);
37801+ atomic_set_unchecked(&tcon->num_ffirst, 0);
37802+ atomic_set_unchecked(&tcon->num_fnext, 0);
37803+ atomic_set_unchecked(&tcon->num_fclose, 0);
37804+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
37805+ atomic_set_unchecked(&tcon->num_symlinks, 0);
37806+ atomic_set_unchecked(&tcon->num_locks, 0);
37807 }
37808 }
37809 }
37810@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
37811 smBufAllocCount.counter, cifs_min_small);
37812 #ifdef CONFIG_CIFS_STATS2
37813 seq_printf(m, "Total Large %d Small %d Allocations\n",
37814- atomic_read(&totBufAllocCount),
37815- atomic_read(&totSmBufAllocCount));
37816+ atomic_read_unchecked(&totBufAllocCount),
37817+ atomic_read_unchecked(&totSmBufAllocCount));
37818 #endif /* CONFIG_CIFS_STATS2 */
37819
37820 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
37821@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
37822 if (tcon->need_reconnect)
37823 seq_puts(m, "\tDISCONNECTED ");
37824 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
37825- atomic_read(&tcon->num_smbs_sent),
37826- atomic_read(&tcon->num_oplock_brks));
37827+ atomic_read_unchecked(&tcon->num_smbs_sent),
37828+ atomic_read_unchecked(&tcon->num_oplock_brks));
37829 seq_printf(m, "\nReads: %d Bytes: %lld",
37830- atomic_read(&tcon->num_reads),
37831+ atomic_read_unchecked(&tcon->num_reads),
37832 (long long)(tcon->bytes_read));
37833 seq_printf(m, "\nWrites: %d Bytes: %lld",
37834- atomic_read(&tcon->num_writes),
37835+ atomic_read_unchecked(&tcon->num_writes),
37836 (long long)(tcon->bytes_written));
37837 seq_printf(m, "\nFlushes: %d",
37838- atomic_read(&tcon->num_flushes));
37839+ atomic_read_unchecked(&tcon->num_flushes));
37840 seq_printf(m, "\nLocks: %d HardLinks: %d "
37841 "Symlinks: %d",
37842- atomic_read(&tcon->num_locks),
37843- atomic_read(&tcon->num_hardlinks),
37844- atomic_read(&tcon->num_symlinks));
37845+ atomic_read_unchecked(&tcon->num_locks),
37846+ atomic_read_unchecked(&tcon->num_hardlinks),
37847+ atomic_read_unchecked(&tcon->num_symlinks));
37848 seq_printf(m, "\nOpens: %d Closes: %d "
37849 "Deletes: %d",
37850- atomic_read(&tcon->num_opens),
37851- atomic_read(&tcon->num_closes),
37852- atomic_read(&tcon->num_deletes));
37853+ atomic_read_unchecked(&tcon->num_opens),
37854+ atomic_read_unchecked(&tcon->num_closes),
37855+ atomic_read_unchecked(&tcon->num_deletes));
37856 seq_printf(m, "\nPosix Opens: %d "
37857 "Posix Mkdirs: %d",
37858- atomic_read(&tcon->num_posixopens),
37859- atomic_read(&tcon->num_posixmkdirs));
37860+ atomic_read_unchecked(&tcon->num_posixopens),
37861+ atomic_read_unchecked(&tcon->num_posixmkdirs));
37862 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
37863- atomic_read(&tcon->num_mkdirs),
37864- atomic_read(&tcon->num_rmdirs));
37865+ atomic_read_unchecked(&tcon->num_mkdirs),
37866+ atomic_read_unchecked(&tcon->num_rmdirs));
37867 seq_printf(m, "\nRenames: %d T2 Renames %d",
37868- atomic_read(&tcon->num_renames),
37869- atomic_read(&tcon->num_t2renames));
37870+ atomic_read_unchecked(&tcon->num_renames),
37871+ atomic_read_unchecked(&tcon->num_t2renames));
37872 seq_printf(m, "\nFindFirst: %d FNext %d "
37873 "FClose %d",
37874- atomic_read(&tcon->num_ffirst),
37875- atomic_read(&tcon->num_fnext),
37876- atomic_read(&tcon->num_fclose));
37877+ atomic_read_unchecked(&tcon->num_ffirst),
37878+ atomic_read_unchecked(&tcon->num_fnext),
37879+ atomic_read_unchecked(&tcon->num_fclose));
37880 }
37881 }
37882 }
37883diff -urNp linux-3.0.4/fs/cifs/cifsfs.c linux-3.0.4/fs/cifs/cifsfs.c
37884--- linux-3.0.4/fs/cifs/cifsfs.c 2011-09-02 18:11:21.000000000 -0400
37885+++ linux-3.0.4/fs/cifs/cifsfs.c 2011-08-25 17:18:05.000000000 -0400
37886@@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
37887 cifs_req_cachep = kmem_cache_create("cifs_request",
37888 CIFSMaxBufSize +
37889 MAX_CIFS_HDR_SIZE, 0,
37890- SLAB_HWCACHE_ALIGN, NULL);
37891+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
37892 if (cifs_req_cachep == NULL)
37893 return -ENOMEM;
37894
37895@@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
37896 efficient to alloc 1 per page off the slab compared to 17K (5page)
37897 alloc of large cifs buffers even when page debugging is on */
37898 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
37899- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
37900+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
37901 NULL);
37902 if (cifs_sm_req_cachep == NULL) {
37903 mempool_destroy(cifs_req_poolp);
37904@@ -1106,8 +1106,8 @@ init_cifs(void)
37905 atomic_set(&bufAllocCount, 0);
37906 atomic_set(&smBufAllocCount, 0);
37907 #ifdef CONFIG_CIFS_STATS2
37908- atomic_set(&totBufAllocCount, 0);
37909- atomic_set(&totSmBufAllocCount, 0);
37910+ atomic_set_unchecked(&totBufAllocCount, 0);
37911+ atomic_set_unchecked(&totSmBufAllocCount, 0);
37912 #endif /* CONFIG_CIFS_STATS2 */
37913
37914 atomic_set(&midCount, 0);
37915diff -urNp linux-3.0.4/fs/cifs/cifsglob.h linux-3.0.4/fs/cifs/cifsglob.h
37916--- linux-3.0.4/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
37917+++ linux-3.0.4/fs/cifs/cifsglob.h 2011-08-25 17:18:05.000000000 -0400
37918@@ -381,28 +381,28 @@ struct cifs_tcon {
37919 __u16 Flags; /* optional support bits */
37920 enum statusEnum tidStatus;
37921 #ifdef CONFIG_CIFS_STATS
37922- atomic_t num_smbs_sent;
37923- atomic_t num_writes;
37924- atomic_t num_reads;
37925- atomic_t num_flushes;
37926- atomic_t num_oplock_brks;
37927- atomic_t num_opens;
37928- atomic_t num_closes;
37929- atomic_t num_deletes;
37930- atomic_t num_mkdirs;
37931- atomic_t num_posixopens;
37932- atomic_t num_posixmkdirs;
37933- atomic_t num_rmdirs;
37934- atomic_t num_renames;
37935- atomic_t num_t2renames;
37936- atomic_t num_ffirst;
37937- atomic_t num_fnext;
37938- atomic_t num_fclose;
37939- atomic_t num_hardlinks;
37940- atomic_t num_symlinks;
37941- atomic_t num_locks;
37942- atomic_t num_acl_get;
37943- atomic_t num_acl_set;
37944+ atomic_unchecked_t num_smbs_sent;
37945+ atomic_unchecked_t num_writes;
37946+ atomic_unchecked_t num_reads;
37947+ atomic_unchecked_t num_flushes;
37948+ atomic_unchecked_t num_oplock_brks;
37949+ atomic_unchecked_t num_opens;
37950+ atomic_unchecked_t num_closes;
37951+ atomic_unchecked_t num_deletes;
37952+ atomic_unchecked_t num_mkdirs;
37953+ atomic_unchecked_t num_posixopens;
37954+ atomic_unchecked_t num_posixmkdirs;
37955+ atomic_unchecked_t num_rmdirs;
37956+ atomic_unchecked_t num_renames;
37957+ atomic_unchecked_t num_t2renames;
37958+ atomic_unchecked_t num_ffirst;
37959+ atomic_unchecked_t num_fnext;
37960+ atomic_unchecked_t num_fclose;
37961+ atomic_unchecked_t num_hardlinks;
37962+ atomic_unchecked_t num_symlinks;
37963+ atomic_unchecked_t num_locks;
37964+ atomic_unchecked_t num_acl_get;
37965+ atomic_unchecked_t num_acl_set;
37966 #ifdef CONFIG_CIFS_STATS2
37967 unsigned long long time_writes;
37968 unsigned long long time_reads;
37969@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
37970 }
37971
37972 #ifdef CONFIG_CIFS_STATS
37973-#define cifs_stats_inc atomic_inc
37974+#define cifs_stats_inc atomic_inc_unchecked
37975
37976 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
37977 unsigned int bytes)
37978@@ -911,8 +911,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
37979 /* Various Debug counters */
37980 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
37981 #ifdef CONFIG_CIFS_STATS2
37982-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
37983-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
37984+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
37985+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
37986 #endif
37987 GLOBAL_EXTERN atomic_t smBufAllocCount;
37988 GLOBAL_EXTERN atomic_t midCount;
37989diff -urNp linux-3.0.4/fs/cifs/link.c linux-3.0.4/fs/cifs/link.c
37990--- linux-3.0.4/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
37991+++ linux-3.0.4/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
37992@@ -587,7 +587,7 @@ symlink_exit:
37993
37994 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
37995 {
37996- char *p = nd_get_link(nd);
37997+ const char *p = nd_get_link(nd);
37998 if (!IS_ERR(p))
37999 kfree(p);
38000 }
38001diff -urNp linux-3.0.4/fs/cifs/misc.c linux-3.0.4/fs/cifs/misc.c
38002--- linux-3.0.4/fs/cifs/misc.c 2011-07-21 22:17:23.000000000 -0400
38003+++ linux-3.0.4/fs/cifs/misc.c 2011-08-25 17:18:05.000000000 -0400
38004@@ -156,7 +156,7 @@ cifs_buf_get(void)
38005 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
38006 atomic_inc(&bufAllocCount);
38007 #ifdef CONFIG_CIFS_STATS2
38008- atomic_inc(&totBufAllocCount);
38009+ atomic_inc_unchecked(&totBufAllocCount);
38010 #endif /* CONFIG_CIFS_STATS2 */
38011 }
38012
38013@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
38014 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
38015 atomic_inc(&smBufAllocCount);
38016 #ifdef CONFIG_CIFS_STATS2
38017- atomic_inc(&totSmBufAllocCount);
38018+ atomic_inc_unchecked(&totSmBufAllocCount);
38019 #endif /* CONFIG_CIFS_STATS2 */
38020
38021 }
38022diff -urNp linux-3.0.4/fs/coda/cache.c linux-3.0.4/fs/coda/cache.c
38023--- linux-3.0.4/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
38024+++ linux-3.0.4/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
38025@@ -24,7 +24,7 @@
38026 #include "coda_linux.h"
38027 #include "coda_cache.h"
38028
38029-static atomic_t permission_epoch = ATOMIC_INIT(0);
38030+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
38031
38032 /* replace or extend an acl cache hit */
38033 void coda_cache_enter(struct inode *inode, int mask)
38034@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
38035 struct coda_inode_info *cii = ITOC(inode);
38036
38037 spin_lock(&cii->c_lock);
38038- cii->c_cached_epoch = atomic_read(&permission_epoch);
38039+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
38040 if (cii->c_uid != current_fsuid()) {
38041 cii->c_uid = current_fsuid();
38042 cii->c_cached_perm = mask;
38043@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
38044 {
38045 struct coda_inode_info *cii = ITOC(inode);
38046 spin_lock(&cii->c_lock);
38047- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
38048+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
38049 spin_unlock(&cii->c_lock);
38050 }
38051
38052 /* remove all acl caches */
38053 void coda_cache_clear_all(struct super_block *sb)
38054 {
38055- atomic_inc(&permission_epoch);
38056+ atomic_inc_unchecked(&permission_epoch);
38057 }
38058
38059
38060@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
38061 spin_lock(&cii->c_lock);
38062 hit = (mask & cii->c_cached_perm) == mask &&
38063 cii->c_uid == current_fsuid() &&
38064- cii->c_cached_epoch == atomic_read(&permission_epoch);
38065+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
38066 spin_unlock(&cii->c_lock);
38067
38068 return hit;
38069diff -urNp linux-3.0.4/fs/compat_binfmt_elf.c linux-3.0.4/fs/compat_binfmt_elf.c
38070--- linux-3.0.4/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
38071+++ linux-3.0.4/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
38072@@ -30,11 +30,13 @@
38073 #undef elf_phdr
38074 #undef elf_shdr
38075 #undef elf_note
38076+#undef elf_dyn
38077 #undef elf_addr_t
38078 #define elfhdr elf32_hdr
38079 #define elf_phdr elf32_phdr
38080 #define elf_shdr elf32_shdr
38081 #define elf_note elf32_note
38082+#define elf_dyn Elf32_Dyn
38083 #define elf_addr_t Elf32_Addr
38084
38085 /*
38086diff -urNp linux-3.0.4/fs/compat.c linux-3.0.4/fs/compat.c
38087--- linux-3.0.4/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
38088+++ linux-3.0.4/fs/compat.c 2011-08-23 22:49:33.000000000 -0400
38089@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
38090 goto out;
38091
38092 ret = -EINVAL;
38093- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
38094+ if (nr_segs > UIO_MAXIOV)
38095 goto out;
38096 if (nr_segs > fast_segs) {
38097 ret = -ENOMEM;
38098@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
38099
38100 struct compat_readdir_callback {
38101 struct compat_old_linux_dirent __user *dirent;
38102+ struct file * file;
38103 int result;
38104 };
38105
38106@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
38107 buf->result = -EOVERFLOW;
38108 return -EOVERFLOW;
38109 }
38110+
38111+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38112+ return 0;
38113+
38114 buf->result++;
38115 dirent = buf->dirent;
38116 if (!access_ok(VERIFY_WRITE, dirent,
38117@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
38118
38119 buf.result = 0;
38120 buf.dirent = dirent;
38121+ buf.file = file;
38122
38123 error = vfs_readdir(file, compat_fillonedir, &buf);
38124 if (buf.result)
38125@@ -917,6 +923,7 @@ struct compat_linux_dirent {
38126 struct compat_getdents_callback {
38127 struct compat_linux_dirent __user *current_dir;
38128 struct compat_linux_dirent __user *previous;
38129+ struct file * file;
38130 int count;
38131 int error;
38132 };
38133@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
38134 buf->error = -EOVERFLOW;
38135 return -EOVERFLOW;
38136 }
38137+
38138+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38139+ return 0;
38140+
38141 dirent = buf->previous;
38142 if (dirent) {
38143 if (__put_user(offset, &dirent->d_off))
38144@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
38145 buf.previous = NULL;
38146 buf.count = count;
38147 buf.error = 0;
38148+ buf.file = file;
38149
38150 error = vfs_readdir(file, compat_filldir, &buf);
38151 if (error >= 0)
38152@@ -1006,6 +1018,7 @@ out:
38153 struct compat_getdents_callback64 {
38154 struct linux_dirent64 __user *current_dir;
38155 struct linux_dirent64 __user *previous;
38156+ struct file * file;
38157 int count;
38158 int error;
38159 };
38160@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
38161 buf->error = -EINVAL; /* only used if we fail.. */
38162 if (reclen > buf->count)
38163 return -EINVAL;
38164+
38165+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38166+ return 0;
38167+
38168 dirent = buf->previous;
38169
38170 if (dirent) {
38171@@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
38172 buf.previous = NULL;
38173 buf.count = count;
38174 buf.error = 0;
38175+ buf.file = file;
38176
38177 error = vfs_readdir(file, compat_filldir64, &buf);
38178 if (error >= 0)
38179@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
38180 struct fdtable *fdt;
38181 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
38182
38183+ pax_track_stack();
38184+
38185 if (n < 0)
38186 goto out_nofds;
38187
38188diff -urNp linux-3.0.4/fs/compat_ioctl.c linux-3.0.4/fs/compat_ioctl.c
38189--- linux-3.0.4/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
38190+++ linux-3.0.4/fs/compat_ioctl.c 2011-08-23 21:47:56.000000000 -0400
38191@@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
38192
38193 err = get_user(palp, &up->palette);
38194 err |= get_user(length, &up->length);
38195+ if (err)
38196+ return -EFAULT;
38197
38198 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
38199 err = put_user(compat_ptr(palp), &up_native->palette);
38200@@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
38201 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
38202 {
38203 unsigned int a, b;
38204- a = *(unsigned int *)p;
38205- b = *(unsigned int *)q;
38206+ a = *(const unsigned int *)p;
38207+ b = *(const unsigned int *)q;
38208 if (a > b)
38209 return 1;
38210 if (a < b)
38211diff -urNp linux-3.0.4/fs/configfs/dir.c linux-3.0.4/fs/configfs/dir.c
38212--- linux-3.0.4/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
38213+++ linux-3.0.4/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
38214@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
38215 }
38216 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
38217 struct configfs_dirent *next;
38218- const char * name;
38219+ const unsigned char * name;
38220+ char d_name[sizeof(next->s_dentry->d_iname)];
38221 int len;
38222 struct inode *inode = NULL;
38223
38224@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
38225 continue;
38226
38227 name = configfs_get_name(next);
38228- len = strlen(name);
38229+ if (next->s_dentry && name == next->s_dentry->d_iname) {
38230+ len = next->s_dentry->d_name.len;
38231+ memcpy(d_name, name, len);
38232+ name = d_name;
38233+ } else
38234+ len = strlen(name);
38235
38236 /*
38237 * We'll have a dentry and an inode for
38238diff -urNp linux-3.0.4/fs/dcache.c linux-3.0.4/fs/dcache.c
38239--- linux-3.0.4/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
38240+++ linux-3.0.4/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
38241@@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
38242 mempages -= reserve;
38243
38244 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
38245- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
38246+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
38247
38248 dcache_init();
38249 inode_init();
38250diff -urNp linux-3.0.4/fs/ecryptfs/inode.c linux-3.0.4/fs/ecryptfs/inode.c
38251--- linux-3.0.4/fs/ecryptfs/inode.c 2011-09-02 18:11:21.000000000 -0400
38252+++ linux-3.0.4/fs/ecryptfs/inode.c 2011-08-23 21:47:56.000000000 -0400
38253@@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
38254 old_fs = get_fs();
38255 set_fs(get_ds());
38256 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
38257- (char __user *)lower_buf,
38258+ (__force char __user *)lower_buf,
38259 lower_bufsiz);
38260 set_fs(old_fs);
38261 if (rc < 0)
38262@@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
38263 }
38264 old_fs = get_fs();
38265 set_fs(get_ds());
38266- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
38267+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
38268 set_fs(old_fs);
38269 if (rc < 0) {
38270 kfree(buf);
38271@@ -765,7 +765,7 @@ out:
38272 static void
38273 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
38274 {
38275- char *buf = nd_get_link(nd);
38276+ const char *buf = nd_get_link(nd);
38277 if (!IS_ERR(buf)) {
38278 /* Free the char* */
38279 kfree(buf);
38280diff -urNp linux-3.0.4/fs/ecryptfs/miscdev.c linux-3.0.4/fs/ecryptfs/miscdev.c
38281--- linux-3.0.4/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
38282+++ linux-3.0.4/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
38283@@ -328,7 +328,7 @@ check_list:
38284 goto out_unlock_msg_ctx;
38285 i = 5;
38286 if (msg_ctx->msg) {
38287- if (copy_to_user(&buf[i], packet_length, packet_length_size))
38288+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
38289 goto out_unlock_msg_ctx;
38290 i += packet_length_size;
38291 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
38292diff -urNp linux-3.0.4/fs/exec.c linux-3.0.4/fs/exec.c
38293--- linux-3.0.4/fs/exec.c 2011-07-21 22:17:23.000000000 -0400
38294+++ linux-3.0.4/fs/exec.c 2011-08-25 17:26:58.000000000 -0400
38295@@ -55,12 +55,24 @@
38296 #include <linux/pipe_fs_i.h>
38297 #include <linux/oom.h>
38298 #include <linux/compat.h>
38299+#include <linux/random.h>
38300+#include <linux/seq_file.h>
38301+
38302+#ifdef CONFIG_PAX_REFCOUNT
38303+#include <linux/kallsyms.h>
38304+#include <linux/kdebug.h>
38305+#endif
38306
38307 #include <asm/uaccess.h>
38308 #include <asm/mmu_context.h>
38309 #include <asm/tlb.h>
38310 #include "internal.h"
38311
38312+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
38313+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
38314+EXPORT_SYMBOL(pax_set_initial_flags_func);
38315+#endif
38316+
38317 int core_uses_pid;
38318 char core_pattern[CORENAME_MAX_SIZE] = "core";
38319 unsigned int core_pipe_limit;
38320@@ -70,7 +82,7 @@ struct core_name {
38321 char *corename;
38322 int used, size;
38323 };
38324-static atomic_t call_count = ATOMIC_INIT(1);
38325+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
38326
38327 /* The maximal length of core_pattern is also specified in sysctl.c */
38328
38329@@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
38330 char *tmp = getname(library);
38331 int error = PTR_ERR(tmp);
38332 static const struct open_flags uselib_flags = {
38333- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
38334+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
38335 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
38336 .intent = LOOKUP_OPEN
38337 };
38338@@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
38339 int write)
38340 {
38341 struct page *page;
38342- int ret;
38343
38344-#ifdef CONFIG_STACK_GROWSUP
38345- if (write) {
38346- ret = expand_downwards(bprm->vma, pos);
38347- if (ret < 0)
38348- return NULL;
38349- }
38350-#endif
38351- ret = get_user_pages(current, bprm->mm, pos,
38352- 1, write, 1, &page, NULL);
38353- if (ret <= 0)
38354+ if (0 > expand_downwards(bprm->vma, pos))
38355+ return NULL;
38356+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
38357 return NULL;
38358
38359 if (write) {
38360@@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
38361 vma->vm_end = STACK_TOP_MAX;
38362 vma->vm_start = vma->vm_end - PAGE_SIZE;
38363 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
38364+
38365+#ifdef CONFIG_PAX_SEGMEXEC
38366+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
38367+#endif
38368+
38369 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
38370 INIT_LIST_HEAD(&vma->anon_vma_chain);
38371
38372@@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
38373 mm->stack_vm = mm->total_vm = 1;
38374 up_write(&mm->mmap_sem);
38375 bprm->p = vma->vm_end - sizeof(void *);
38376+
38377+#ifdef CONFIG_PAX_RANDUSTACK
38378+ if (randomize_va_space)
38379+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
38380+#endif
38381+
38382 return 0;
38383 err:
38384 up_write(&mm->mmap_sem);
38385@@ -403,19 +418,7 @@ err:
38386 return err;
38387 }
38388
38389-struct user_arg_ptr {
38390-#ifdef CONFIG_COMPAT
38391- bool is_compat;
38392-#endif
38393- union {
38394- const char __user *const __user *native;
38395-#ifdef CONFIG_COMPAT
38396- compat_uptr_t __user *compat;
38397-#endif
38398- } ptr;
38399-};
38400-
38401-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
38402+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
38403 {
38404 const char __user *native;
38405
38406@@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
38407 int r;
38408 mm_segment_t oldfs = get_fs();
38409 struct user_arg_ptr argv = {
38410- .ptr.native = (const char __user *const __user *)__argv,
38411+ .ptr.native = (__force const char __user *const __user *)__argv,
38412 };
38413
38414 set_fs(KERNEL_DS);
38415@@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
38416 unsigned long new_end = old_end - shift;
38417 struct mmu_gather tlb;
38418
38419- BUG_ON(new_start > new_end);
38420+ if (new_start >= new_end || new_start < mmap_min_addr)
38421+ return -ENOMEM;
38422
38423 /*
38424 * ensure there are no vmas between where we want to go
38425@@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
38426 if (vma != find_vma(mm, new_start))
38427 return -EFAULT;
38428
38429+#ifdef CONFIG_PAX_SEGMEXEC
38430+ BUG_ON(pax_find_mirror_vma(vma));
38431+#endif
38432+
38433 /*
38434 * cover the whole range: [new_start, old_end)
38435 */
38436@@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
38437 stack_top = arch_align_stack(stack_top);
38438 stack_top = PAGE_ALIGN(stack_top);
38439
38440- if (unlikely(stack_top < mmap_min_addr) ||
38441- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
38442- return -ENOMEM;
38443-
38444 stack_shift = vma->vm_end - stack_top;
38445
38446 bprm->p -= stack_shift;
38447@@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
38448 bprm->exec -= stack_shift;
38449
38450 down_write(&mm->mmap_sem);
38451+
38452+ /* Move stack pages down in memory. */
38453+ if (stack_shift) {
38454+ ret = shift_arg_pages(vma, stack_shift);
38455+ if (ret)
38456+ goto out_unlock;
38457+ }
38458+
38459 vm_flags = VM_STACK_FLAGS;
38460
38461+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38462+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38463+ vm_flags &= ~VM_EXEC;
38464+
38465+#ifdef CONFIG_PAX_MPROTECT
38466+ if (mm->pax_flags & MF_PAX_MPROTECT)
38467+ vm_flags &= ~VM_MAYEXEC;
38468+#endif
38469+
38470+ }
38471+#endif
38472+
38473 /*
38474 * Adjust stack execute permissions; explicitly enable for
38475 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
38476@@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
38477 goto out_unlock;
38478 BUG_ON(prev != vma);
38479
38480- /* Move stack pages down in memory. */
38481- if (stack_shift) {
38482- ret = shift_arg_pages(vma, stack_shift);
38483- if (ret)
38484- goto out_unlock;
38485- }
38486-
38487 /* mprotect_fixup is overkill to remove the temporary stack flags */
38488 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
38489
38490@@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
38491 struct file *file;
38492 int err;
38493 static const struct open_flags open_exec_flags = {
38494- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
38495+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
38496 .acc_mode = MAY_EXEC | MAY_OPEN,
38497 .intent = LOOKUP_OPEN
38498 };
38499@@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
38500 old_fs = get_fs();
38501 set_fs(get_ds());
38502 /* The cast to a user pointer is valid due to the set_fs() */
38503- result = vfs_read(file, (void __user *)addr, count, &pos);
38504+ result = vfs_read(file, (__force void __user *)addr, count, &pos);
38505 set_fs(old_fs);
38506 return result;
38507 }
38508@@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
38509 }
38510 rcu_read_unlock();
38511
38512- if (p->fs->users > n_fs) {
38513+ if (atomic_read(&p->fs->users) > n_fs) {
38514 bprm->unsafe |= LSM_UNSAFE_SHARE;
38515 } else {
38516 res = -EAGAIN;
38517@@ -1428,11 +1445,35 @@ static int do_execve_common(const char *
38518 struct user_arg_ptr envp,
38519 struct pt_regs *regs)
38520 {
38521+#ifdef CONFIG_GRKERNSEC
38522+ struct file *old_exec_file;
38523+ struct acl_subject_label *old_acl;
38524+ struct rlimit old_rlim[RLIM_NLIMITS];
38525+#endif
38526 struct linux_binprm *bprm;
38527 struct file *file;
38528 struct files_struct *displaced;
38529 bool clear_in_exec;
38530 int retval;
38531+ const struct cred *cred = current_cred();
38532+
38533+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
38534+
38535+ /*
38536+ * We move the actual failure in case of RLIMIT_NPROC excess from
38537+ * set*uid() to execve() because too many poorly written programs
38538+ * don't check setuid() return code. Here we additionally recheck
38539+ * whether NPROC limit is still exceeded.
38540+ */
38541+ if ((current->flags & PF_NPROC_EXCEEDED) &&
38542+ atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
38543+ retval = -EAGAIN;
38544+ goto out_ret;
38545+ }
38546+
38547+ /* We're below the limit (still or again), so we don't want to make
38548+ * further execve() calls fail. */
38549+ current->flags &= ~PF_NPROC_EXCEEDED;
38550
38551 retval = unshare_files(&displaced);
38552 if (retval)
38553@@ -1464,6 +1505,16 @@ static int do_execve_common(const char *
38554 bprm->filename = filename;
38555 bprm->interp = filename;
38556
38557+ if (gr_process_user_ban()) {
38558+ retval = -EPERM;
38559+ goto out_file;
38560+ }
38561+
38562+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
38563+ retval = -EACCES;
38564+ goto out_file;
38565+ }
38566+
38567 retval = bprm_mm_init(bprm);
38568 if (retval)
38569 goto out_file;
38570@@ -1493,9 +1544,40 @@ static int do_execve_common(const char *
38571 if (retval < 0)
38572 goto out;
38573
38574+ if (!gr_tpe_allow(file)) {
38575+ retval = -EACCES;
38576+ goto out;
38577+ }
38578+
38579+ if (gr_check_crash_exec(file)) {
38580+ retval = -EACCES;
38581+ goto out;
38582+ }
38583+
38584+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
38585+
38586+ gr_handle_exec_args(bprm, argv);
38587+
38588+#ifdef CONFIG_GRKERNSEC
38589+ old_acl = current->acl;
38590+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
38591+ old_exec_file = current->exec_file;
38592+ get_file(file);
38593+ current->exec_file = file;
38594+#endif
38595+
38596+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
38597+ bprm->unsafe & LSM_UNSAFE_SHARE);
38598+ if (retval < 0)
38599+ goto out_fail;
38600+
38601 retval = search_binary_handler(bprm,regs);
38602 if (retval < 0)
38603- goto out;
38604+ goto out_fail;
38605+#ifdef CONFIG_GRKERNSEC
38606+ if (old_exec_file)
38607+ fput(old_exec_file);
38608+#endif
38609
38610 /* execve succeeded */
38611 current->fs->in_exec = 0;
38612@@ -1506,6 +1588,14 @@ static int do_execve_common(const char *
38613 put_files_struct(displaced);
38614 return retval;
38615
38616+out_fail:
38617+#ifdef CONFIG_GRKERNSEC
38618+ current->acl = old_acl;
38619+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
38620+ fput(current->exec_file);
38621+ current->exec_file = old_exec_file;
38622+#endif
38623+
38624 out:
38625 if (bprm->mm) {
38626 acct_arg_size(bprm, 0);
38627@@ -1579,7 +1669,7 @@ static int expand_corename(struct core_n
38628 {
38629 char *old_corename = cn->corename;
38630
38631- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
38632+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
38633 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
38634
38635 if (!cn->corename) {
38636@@ -1667,7 +1757,7 @@ static int format_corename(struct core_n
38637 int pid_in_pattern = 0;
38638 int err = 0;
38639
38640- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
38641+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
38642 cn->corename = kmalloc(cn->size, GFP_KERNEL);
38643 cn->used = 0;
38644
38645@@ -1758,6 +1848,219 @@ out:
38646 return ispipe;
38647 }
38648
38649+int pax_check_flags(unsigned long *flags)
38650+{
38651+ int retval = 0;
38652+
38653+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
38654+ if (*flags & MF_PAX_SEGMEXEC)
38655+ {
38656+ *flags &= ~MF_PAX_SEGMEXEC;
38657+ retval = -EINVAL;
38658+ }
38659+#endif
38660+
38661+ if ((*flags & MF_PAX_PAGEEXEC)
38662+
38663+#ifdef CONFIG_PAX_PAGEEXEC
38664+ && (*flags & MF_PAX_SEGMEXEC)
38665+#endif
38666+
38667+ )
38668+ {
38669+ *flags &= ~MF_PAX_PAGEEXEC;
38670+ retval = -EINVAL;
38671+ }
38672+
38673+ if ((*flags & MF_PAX_MPROTECT)
38674+
38675+#ifdef CONFIG_PAX_MPROTECT
38676+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38677+#endif
38678+
38679+ )
38680+ {
38681+ *flags &= ~MF_PAX_MPROTECT;
38682+ retval = -EINVAL;
38683+ }
38684+
38685+ if ((*flags & MF_PAX_EMUTRAMP)
38686+
38687+#ifdef CONFIG_PAX_EMUTRAMP
38688+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38689+#endif
38690+
38691+ )
38692+ {
38693+ *flags &= ~MF_PAX_EMUTRAMP;
38694+ retval = -EINVAL;
38695+ }
38696+
38697+ return retval;
38698+}
38699+
38700+EXPORT_SYMBOL(pax_check_flags);
38701+
38702+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38703+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
38704+{
38705+ struct task_struct *tsk = current;
38706+ struct mm_struct *mm = current->mm;
38707+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
38708+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
38709+ char *path_exec = NULL;
38710+ char *path_fault = NULL;
38711+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
38712+
38713+ if (buffer_exec && buffer_fault) {
38714+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
38715+
38716+ down_read(&mm->mmap_sem);
38717+ vma = mm->mmap;
38718+ while (vma && (!vma_exec || !vma_fault)) {
38719+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
38720+ vma_exec = vma;
38721+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
38722+ vma_fault = vma;
38723+ vma = vma->vm_next;
38724+ }
38725+ if (vma_exec) {
38726+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
38727+ if (IS_ERR(path_exec))
38728+ path_exec = "<path too long>";
38729+ else {
38730+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
38731+ if (path_exec) {
38732+ *path_exec = 0;
38733+ path_exec = buffer_exec;
38734+ } else
38735+ path_exec = "<path too long>";
38736+ }
38737+ }
38738+ if (vma_fault) {
38739+ start = vma_fault->vm_start;
38740+ end = vma_fault->vm_end;
38741+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
38742+ if (vma_fault->vm_file) {
38743+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
38744+ if (IS_ERR(path_fault))
38745+ path_fault = "<path too long>";
38746+ else {
38747+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
38748+ if (path_fault) {
38749+ *path_fault = 0;
38750+ path_fault = buffer_fault;
38751+ } else
38752+ path_fault = "<path too long>";
38753+ }
38754+ } else
38755+ path_fault = "<anonymous mapping>";
38756+ }
38757+ up_read(&mm->mmap_sem);
38758+ }
38759+ if (tsk->signal->curr_ip)
38760+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
38761+ else
38762+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
38763+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
38764+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
38765+ task_uid(tsk), task_euid(tsk), pc, sp);
38766+ free_page((unsigned long)buffer_exec);
38767+ free_page((unsigned long)buffer_fault);
38768+ pax_report_insns(pc, sp);
38769+ do_coredump(SIGKILL, SIGKILL, regs);
38770+}
38771+#endif
38772+
38773+#ifdef CONFIG_PAX_REFCOUNT
38774+void pax_report_refcount_overflow(struct pt_regs *regs)
38775+{
38776+ if (current->signal->curr_ip)
38777+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38778+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
38779+ else
38780+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38781+ current->comm, task_pid_nr(current), current_uid(), current_euid());
38782+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
38783+ show_regs(regs);
38784+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
38785+}
38786+#endif
38787+
38788+#ifdef CONFIG_PAX_USERCOPY
38789+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
38790+int object_is_on_stack(const void *obj, unsigned long len)
38791+{
38792+ const void * const stack = task_stack_page(current);
38793+ const void * const stackend = stack + THREAD_SIZE;
38794+
38795+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38796+ const void *frame = NULL;
38797+ const void *oldframe;
38798+#endif
38799+
38800+ if (obj + len < obj)
38801+ return -1;
38802+
38803+ if (obj + len <= stack || stackend <= obj)
38804+ return 0;
38805+
38806+ if (obj < stack || stackend < obj + len)
38807+ return -1;
38808+
38809+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38810+ oldframe = __builtin_frame_address(1);
38811+ if (oldframe)
38812+ frame = __builtin_frame_address(2);
38813+ /*
38814+ low ----------------------------------------------> high
38815+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
38816+ ^----------------^
38817+ allow copies only within here
38818+ */
38819+ while (stack <= frame && frame < stackend) {
38820+ /* if obj + len extends past the last frame, this
38821+ check won't pass and the next frame will be 0,
38822+ causing us to bail out and correctly report
38823+ the copy as invalid
38824+ */
38825+ if (obj + len <= frame)
38826+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
38827+ oldframe = frame;
38828+ frame = *(const void * const *)frame;
38829+ }
38830+ return -1;
38831+#else
38832+ return 1;
38833+#endif
38834+}
38835+
38836+
38837+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
38838+{
38839+ if (current->signal->curr_ip)
38840+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38841+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38842+ else
38843+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38844+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38845+ dump_stack();
38846+ gr_handle_kernel_exploit();
38847+ do_group_exit(SIGKILL);
38848+}
38849+#endif
38850+
38851+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
38852+void pax_track_stack(void)
38853+{
38854+ unsigned long sp = (unsigned long)&sp;
38855+ if (sp < current_thread_info()->lowest_stack &&
38856+ sp > (unsigned long)task_stack_page(current))
38857+ current_thread_info()->lowest_stack = sp;
38858+}
38859+EXPORT_SYMBOL(pax_track_stack);
38860+#endif
38861+
38862 static int zap_process(struct task_struct *start, int exit_code)
38863 {
38864 struct task_struct *t;
38865@@ -1969,17 +2272,17 @@ static void wait_for_dump_helpers(struct
38866 pipe = file->f_path.dentry->d_inode->i_pipe;
38867
38868 pipe_lock(pipe);
38869- pipe->readers++;
38870- pipe->writers--;
38871+ atomic_inc(&pipe->readers);
38872+ atomic_dec(&pipe->writers);
38873
38874- while ((pipe->readers > 1) && (!signal_pending(current))) {
38875+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
38876 wake_up_interruptible_sync(&pipe->wait);
38877 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
38878 pipe_wait(pipe);
38879 }
38880
38881- pipe->readers--;
38882- pipe->writers++;
38883+ atomic_dec(&pipe->readers);
38884+ atomic_inc(&pipe->writers);
38885 pipe_unlock(pipe);
38886
38887 }
38888@@ -2040,7 +2343,7 @@ void do_coredump(long signr, int exit_co
38889 int retval = 0;
38890 int flag = 0;
38891 int ispipe;
38892- static atomic_t core_dump_count = ATOMIC_INIT(0);
38893+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
38894 struct coredump_params cprm = {
38895 .signr = signr,
38896 .regs = regs,
38897@@ -2055,6 +2358,9 @@ void do_coredump(long signr, int exit_co
38898
38899 audit_core_dumps(signr);
38900
38901+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
38902+ gr_handle_brute_attach(current, cprm.mm_flags);
38903+
38904 binfmt = mm->binfmt;
38905 if (!binfmt || !binfmt->core_dump)
38906 goto fail;
38907@@ -2095,6 +2401,8 @@ void do_coredump(long signr, int exit_co
38908 goto fail_corename;
38909 }
38910
38911+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
38912+
38913 if (ispipe) {
38914 int dump_count;
38915 char **helper_argv;
38916@@ -2122,7 +2430,7 @@ void do_coredump(long signr, int exit_co
38917 }
38918 cprm.limit = RLIM_INFINITY;
38919
38920- dump_count = atomic_inc_return(&core_dump_count);
38921+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
38922 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
38923 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
38924 task_tgid_vnr(current), current->comm);
38925@@ -2192,7 +2500,7 @@ close_fail:
38926 filp_close(cprm.file, NULL);
38927 fail_dropcount:
38928 if (ispipe)
38929- atomic_dec(&core_dump_count);
38930+ atomic_dec_unchecked(&core_dump_count);
38931 fail_unlock:
38932 kfree(cn.corename);
38933 fail_corename:
38934diff -urNp linux-3.0.4/fs/ext2/balloc.c linux-3.0.4/fs/ext2/balloc.c
38935--- linux-3.0.4/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
38936+++ linux-3.0.4/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
38937@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
38938
38939 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38940 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38941- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38942+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38943 sbi->s_resuid != current_fsuid() &&
38944 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38945 return 0;
38946diff -urNp linux-3.0.4/fs/ext3/balloc.c linux-3.0.4/fs/ext3/balloc.c
38947--- linux-3.0.4/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
38948+++ linux-3.0.4/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
38949@@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
38950
38951 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38952 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38953- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38954+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38955 sbi->s_resuid != current_fsuid() &&
38956 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38957 return 0;
38958diff -urNp linux-3.0.4/fs/ext4/balloc.c linux-3.0.4/fs/ext4/balloc.c
38959--- linux-3.0.4/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
38960+++ linux-3.0.4/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
38961@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
38962 /* Hm, nope. Are (enough) root reserved blocks available? */
38963 if (sbi->s_resuid == current_fsuid() ||
38964 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
38965- capable(CAP_SYS_RESOURCE) ||
38966- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
38967+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
38968+ capable_nolog(CAP_SYS_RESOURCE)) {
38969
38970 if (free_blocks >= (nblocks + dirty_blocks))
38971 return 1;
38972diff -urNp linux-3.0.4/fs/ext4/ext4.h linux-3.0.4/fs/ext4/ext4.h
38973--- linux-3.0.4/fs/ext4/ext4.h 2011-09-02 18:11:21.000000000 -0400
38974+++ linux-3.0.4/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
38975@@ -1177,19 +1177,19 @@ struct ext4_sb_info {
38976 unsigned long s_mb_last_start;
38977
38978 /* stats for buddy allocator */
38979- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
38980- atomic_t s_bal_success; /* we found long enough chunks */
38981- atomic_t s_bal_allocated; /* in blocks */
38982- atomic_t s_bal_ex_scanned; /* total extents scanned */
38983- atomic_t s_bal_goals; /* goal hits */
38984- atomic_t s_bal_breaks; /* too long searches */
38985- atomic_t s_bal_2orders; /* 2^order hits */
38986+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
38987+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
38988+ atomic_unchecked_t s_bal_allocated; /* in blocks */
38989+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
38990+ atomic_unchecked_t s_bal_goals; /* goal hits */
38991+ atomic_unchecked_t s_bal_breaks; /* too long searches */
38992+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
38993 spinlock_t s_bal_lock;
38994 unsigned long s_mb_buddies_generated;
38995 unsigned long long s_mb_generation_time;
38996- atomic_t s_mb_lost_chunks;
38997- atomic_t s_mb_preallocated;
38998- atomic_t s_mb_discarded;
38999+ atomic_unchecked_t s_mb_lost_chunks;
39000+ atomic_unchecked_t s_mb_preallocated;
39001+ atomic_unchecked_t s_mb_discarded;
39002 atomic_t s_lock_busy;
39003
39004 /* locality groups */
39005diff -urNp linux-3.0.4/fs/ext4/mballoc.c linux-3.0.4/fs/ext4/mballoc.c
39006--- linux-3.0.4/fs/ext4/mballoc.c 2011-09-02 18:11:21.000000000 -0400
39007+++ linux-3.0.4/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
39008@@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
39009 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
39010
39011 if (EXT4_SB(sb)->s_mb_stats)
39012- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
39013+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
39014
39015 break;
39016 }
39017@@ -2087,7 +2087,7 @@ repeat:
39018 ac->ac_status = AC_STATUS_CONTINUE;
39019 ac->ac_flags |= EXT4_MB_HINT_FIRST;
39020 cr = 3;
39021- atomic_inc(&sbi->s_mb_lost_chunks);
39022+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
39023 goto repeat;
39024 }
39025 }
39026@@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
39027 ext4_grpblk_t counters[16];
39028 } sg;
39029
39030+ pax_track_stack();
39031+
39032 group--;
39033 if (group == 0)
39034 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
39035@@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
39036 if (sbi->s_mb_stats) {
39037 printk(KERN_INFO
39038 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
39039- atomic_read(&sbi->s_bal_allocated),
39040- atomic_read(&sbi->s_bal_reqs),
39041- atomic_read(&sbi->s_bal_success));
39042+ atomic_read_unchecked(&sbi->s_bal_allocated),
39043+ atomic_read_unchecked(&sbi->s_bal_reqs),
39044+ atomic_read_unchecked(&sbi->s_bal_success));
39045 printk(KERN_INFO
39046 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
39047 "%u 2^N hits, %u breaks, %u lost\n",
39048- atomic_read(&sbi->s_bal_ex_scanned),
39049- atomic_read(&sbi->s_bal_goals),
39050- atomic_read(&sbi->s_bal_2orders),
39051- atomic_read(&sbi->s_bal_breaks),
39052- atomic_read(&sbi->s_mb_lost_chunks));
39053+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
39054+ atomic_read_unchecked(&sbi->s_bal_goals),
39055+ atomic_read_unchecked(&sbi->s_bal_2orders),
39056+ atomic_read_unchecked(&sbi->s_bal_breaks),
39057+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
39058 printk(KERN_INFO
39059 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
39060 sbi->s_mb_buddies_generated++,
39061 sbi->s_mb_generation_time);
39062 printk(KERN_INFO
39063 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
39064- atomic_read(&sbi->s_mb_preallocated),
39065- atomic_read(&sbi->s_mb_discarded));
39066+ atomic_read_unchecked(&sbi->s_mb_preallocated),
39067+ atomic_read_unchecked(&sbi->s_mb_discarded));
39068 }
39069
39070 free_percpu(sbi->s_locality_groups);
39071@@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
39072 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
39073
39074 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
39075- atomic_inc(&sbi->s_bal_reqs);
39076- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39077+ atomic_inc_unchecked(&sbi->s_bal_reqs);
39078+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39079 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
39080- atomic_inc(&sbi->s_bal_success);
39081- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
39082+ atomic_inc_unchecked(&sbi->s_bal_success);
39083+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
39084 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
39085 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
39086- atomic_inc(&sbi->s_bal_goals);
39087+ atomic_inc_unchecked(&sbi->s_bal_goals);
39088 if (ac->ac_found > sbi->s_mb_max_to_scan)
39089- atomic_inc(&sbi->s_bal_breaks);
39090+ atomic_inc_unchecked(&sbi->s_bal_breaks);
39091 }
39092
39093 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
39094@@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
39095 trace_ext4_mb_new_inode_pa(ac, pa);
39096
39097 ext4_mb_use_inode_pa(ac, pa);
39098- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39099+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39100
39101 ei = EXT4_I(ac->ac_inode);
39102 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39103@@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
39104 trace_ext4_mb_new_group_pa(ac, pa);
39105
39106 ext4_mb_use_group_pa(ac, pa);
39107- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39108+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39109
39110 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39111 lg = ac->ac_lg;
39112@@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
39113 * from the bitmap and continue.
39114 */
39115 }
39116- atomic_add(free, &sbi->s_mb_discarded);
39117+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
39118
39119 return err;
39120 }
39121@@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
39122 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
39123 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
39124 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
39125- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39126+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39127 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
39128
39129 return 0;
39130diff -urNp linux-3.0.4/fs/fcntl.c linux-3.0.4/fs/fcntl.c
39131--- linux-3.0.4/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
39132+++ linux-3.0.4/fs/fcntl.c 2011-08-23 21:48:14.000000000 -0400
39133@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
39134 if (err)
39135 return err;
39136
39137+ if (gr_handle_chroot_fowner(pid, type))
39138+ return -ENOENT;
39139+ if (gr_check_protected_task_fowner(pid, type))
39140+ return -EACCES;
39141+
39142 f_modown(filp, pid, type, force);
39143 return 0;
39144 }
39145@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
39146 switch (cmd) {
39147 case F_DUPFD:
39148 case F_DUPFD_CLOEXEC:
39149+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
39150 if (arg >= rlimit(RLIMIT_NOFILE))
39151 break;
39152 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
39153@@ -835,14 +841,14 @@ static int __init fcntl_init(void)
39154 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
39155 * is defined as O_NONBLOCK on some platforms and not on others.
39156 */
39157- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
39158+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
39159 O_RDONLY | O_WRONLY | O_RDWR |
39160 O_CREAT | O_EXCL | O_NOCTTY |
39161 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
39162 __O_SYNC | O_DSYNC | FASYNC |
39163 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
39164 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
39165- __FMODE_EXEC | O_PATH
39166+ __FMODE_EXEC | O_PATH | FMODE_GREXEC
39167 ));
39168
39169 fasync_cache = kmem_cache_create("fasync_cache",
39170diff -urNp linux-3.0.4/fs/fifo.c linux-3.0.4/fs/fifo.c
39171--- linux-3.0.4/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
39172+++ linux-3.0.4/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
39173@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
39174 */
39175 filp->f_op = &read_pipefifo_fops;
39176 pipe->r_counter++;
39177- if (pipe->readers++ == 0)
39178+ if (atomic_inc_return(&pipe->readers) == 1)
39179 wake_up_partner(inode);
39180
39181- if (!pipe->writers) {
39182+ if (!atomic_read(&pipe->writers)) {
39183 if ((filp->f_flags & O_NONBLOCK)) {
39184 /* suppress POLLHUP until we have
39185 * seen a writer */
39186@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
39187 * errno=ENXIO when there is no process reading the FIFO.
39188 */
39189 ret = -ENXIO;
39190- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
39191+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
39192 goto err;
39193
39194 filp->f_op = &write_pipefifo_fops;
39195 pipe->w_counter++;
39196- if (!pipe->writers++)
39197+ if (atomic_inc_return(&pipe->writers) == 1)
39198 wake_up_partner(inode);
39199
39200- if (!pipe->readers) {
39201+ if (!atomic_read(&pipe->readers)) {
39202 wait_for_partner(inode, &pipe->r_counter);
39203 if (signal_pending(current))
39204 goto err_wr;
39205@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
39206 */
39207 filp->f_op = &rdwr_pipefifo_fops;
39208
39209- pipe->readers++;
39210- pipe->writers++;
39211+ atomic_inc(&pipe->readers);
39212+ atomic_inc(&pipe->writers);
39213 pipe->r_counter++;
39214 pipe->w_counter++;
39215- if (pipe->readers == 1 || pipe->writers == 1)
39216+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
39217 wake_up_partner(inode);
39218 break;
39219
39220@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
39221 return 0;
39222
39223 err_rd:
39224- if (!--pipe->readers)
39225+ if (atomic_dec_and_test(&pipe->readers))
39226 wake_up_interruptible(&pipe->wait);
39227 ret = -ERESTARTSYS;
39228 goto err;
39229
39230 err_wr:
39231- if (!--pipe->writers)
39232+ if (atomic_dec_and_test(&pipe->writers))
39233 wake_up_interruptible(&pipe->wait);
39234 ret = -ERESTARTSYS;
39235 goto err;
39236
39237 err:
39238- if (!pipe->readers && !pipe->writers)
39239+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
39240 free_pipe_info(inode);
39241
39242 err_nocleanup:
39243diff -urNp linux-3.0.4/fs/file.c linux-3.0.4/fs/file.c
39244--- linux-3.0.4/fs/file.c 2011-07-21 22:17:23.000000000 -0400
39245+++ linux-3.0.4/fs/file.c 2011-08-23 21:48:14.000000000 -0400
39246@@ -15,6 +15,7 @@
39247 #include <linux/slab.h>
39248 #include <linux/vmalloc.h>
39249 #include <linux/file.h>
39250+#include <linux/security.h>
39251 #include <linux/fdtable.h>
39252 #include <linux/bitops.h>
39253 #include <linux/interrupt.h>
39254@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
39255 * N.B. For clone tasks sharing a files structure, this test
39256 * will limit the total number of files that can be opened.
39257 */
39258+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
39259 if (nr >= rlimit(RLIMIT_NOFILE))
39260 return -EMFILE;
39261
39262diff -urNp linux-3.0.4/fs/filesystems.c linux-3.0.4/fs/filesystems.c
39263--- linux-3.0.4/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
39264+++ linux-3.0.4/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
39265@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
39266 int len = dot ? dot - name : strlen(name);
39267
39268 fs = __get_fs_type(name, len);
39269+
39270+#ifdef CONFIG_GRKERNSEC_MODHARDEN
39271+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
39272+#else
39273 if (!fs && (request_module("%.*s", len, name) == 0))
39274+#endif
39275 fs = __get_fs_type(name, len);
39276
39277 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
39278diff -urNp linux-3.0.4/fs/fscache/cookie.c linux-3.0.4/fs/fscache/cookie.c
39279--- linux-3.0.4/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
39280+++ linux-3.0.4/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
39281@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
39282 parent ? (char *) parent->def->name : "<no-parent>",
39283 def->name, netfs_data);
39284
39285- fscache_stat(&fscache_n_acquires);
39286+ fscache_stat_unchecked(&fscache_n_acquires);
39287
39288 /* if there's no parent cookie, then we don't create one here either */
39289 if (!parent) {
39290- fscache_stat(&fscache_n_acquires_null);
39291+ fscache_stat_unchecked(&fscache_n_acquires_null);
39292 _leave(" [no parent]");
39293 return NULL;
39294 }
39295@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
39296 /* allocate and initialise a cookie */
39297 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
39298 if (!cookie) {
39299- fscache_stat(&fscache_n_acquires_oom);
39300+ fscache_stat_unchecked(&fscache_n_acquires_oom);
39301 _leave(" [ENOMEM]");
39302 return NULL;
39303 }
39304@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
39305
39306 switch (cookie->def->type) {
39307 case FSCACHE_COOKIE_TYPE_INDEX:
39308- fscache_stat(&fscache_n_cookie_index);
39309+ fscache_stat_unchecked(&fscache_n_cookie_index);
39310 break;
39311 case FSCACHE_COOKIE_TYPE_DATAFILE:
39312- fscache_stat(&fscache_n_cookie_data);
39313+ fscache_stat_unchecked(&fscache_n_cookie_data);
39314 break;
39315 default:
39316- fscache_stat(&fscache_n_cookie_special);
39317+ fscache_stat_unchecked(&fscache_n_cookie_special);
39318 break;
39319 }
39320
39321@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
39322 if (fscache_acquire_non_index_cookie(cookie) < 0) {
39323 atomic_dec(&parent->n_children);
39324 __fscache_cookie_put(cookie);
39325- fscache_stat(&fscache_n_acquires_nobufs);
39326+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
39327 _leave(" = NULL");
39328 return NULL;
39329 }
39330 }
39331
39332- fscache_stat(&fscache_n_acquires_ok);
39333+ fscache_stat_unchecked(&fscache_n_acquires_ok);
39334 _leave(" = %p", cookie);
39335 return cookie;
39336 }
39337@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
39338 cache = fscache_select_cache_for_object(cookie->parent);
39339 if (!cache) {
39340 up_read(&fscache_addremove_sem);
39341- fscache_stat(&fscache_n_acquires_no_cache);
39342+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
39343 _leave(" = -ENOMEDIUM [no cache]");
39344 return -ENOMEDIUM;
39345 }
39346@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
39347 object = cache->ops->alloc_object(cache, cookie);
39348 fscache_stat_d(&fscache_n_cop_alloc_object);
39349 if (IS_ERR(object)) {
39350- fscache_stat(&fscache_n_object_no_alloc);
39351+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
39352 ret = PTR_ERR(object);
39353 goto error;
39354 }
39355
39356- fscache_stat(&fscache_n_object_alloc);
39357+ fscache_stat_unchecked(&fscache_n_object_alloc);
39358
39359 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
39360
39361@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
39362 struct fscache_object *object;
39363 struct hlist_node *_p;
39364
39365- fscache_stat(&fscache_n_updates);
39366+ fscache_stat_unchecked(&fscache_n_updates);
39367
39368 if (!cookie) {
39369- fscache_stat(&fscache_n_updates_null);
39370+ fscache_stat_unchecked(&fscache_n_updates_null);
39371 _leave(" [no cookie]");
39372 return;
39373 }
39374@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
39375 struct fscache_object *object;
39376 unsigned long event;
39377
39378- fscache_stat(&fscache_n_relinquishes);
39379+ fscache_stat_unchecked(&fscache_n_relinquishes);
39380 if (retire)
39381- fscache_stat(&fscache_n_relinquishes_retire);
39382+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
39383
39384 if (!cookie) {
39385- fscache_stat(&fscache_n_relinquishes_null);
39386+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
39387 _leave(" [no cookie]");
39388 return;
39389 }
39390@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
39391
39392 /* wait for the cookie to finish being instantiated (or to fail) */
39393 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
39394- fscache_stat(&fscache_n_relinquishes_waitcrt);
39395+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
39396 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
39397 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
39398 }
39399diff -urNp linux-3.0.4/fs/fscache/internal.h linux-3.0.4/fs/fscache/internal.h
39400--- linux-3.0.4/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
39401+++ linux-3.0.4/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
39402@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
39403 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
39404 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
39405
39406-extern atomic_t fscache_n_op_pend;
39407-extern atomic_t fscache_n_op_run;
39408-extern atomic_t fscache_n_op_enqueue;
39409-extern atomic_t fscache_n_op_deferred_release;
39410-extern atomic_t fscache_n_op_release;
39411-extern atomic_t fscache_n_op_gc;
39412-extern atomic_t fscache_n_op_cancelled;
39413-extern atomic_t fscache_n_op_rejected;
39414-
39415-extern atomic_t fscache_n_attr_changed;
39416-extern atomic_t fscache_n_attr_changed_ok;
39417-extern atomic_t fscache_n_attr_changed_nobufs;
39418-extern atomic_t fscache_n_attr_changed_nomem;
39419-extern atomic_t fscache_n_attr_changed_calls;
39420-
39421-extern atomic_t fscache_n_allocs;
39422-extern atomic_t fscache_n_allocs_ok;
39423-extern atomic_t fscache_n_allocs_wait;
39424-extern atomic_t fscache_n_allocs_nobufs;
39425-extern atomic_t fscache_n_allocs_intr;
39426-extern atomic_t fscache_n_allocs_object_dead;
39427-extern atomic_t fscache_n_alloc_ops;
39428-extern atomic_t fscache_n_alloc_op_waits;
39429-
39430-extern atomic_t fscache_n_retrievals;
39431-extern atomic_t fscache_n_retrievals_ok;
39432-extern atomic_t fscache_n_retrievals_wait;
39433-extern atomic_t fscache_n_retrievals_nodata;
39434-extern atomic_t fscache_n_retrievals_nobufs;
39435-extern atomic_t fscache_n_retrievals_intr;
39436-extern atomic_t fscache_n_retrievals_nomem;
39437-extern atomic_t fscache_n_retrievals_object_dead;
39438-extern atomic_t fscache_n_retrieval_ops;
39439-extern atomic_t fscache_n_retrieval_op_waits;
39440-
39441-extern atomic_t fscache_n_stores;
39442-extern atomic_t fscache_n_stores_ok;
39443-extern atomic_t fscache_n_stores_again;
39444-extern atomic_t fscache_n_stores_nobufs;
39445-extern atomic_t fscache_n_stores_oom;
39446-extern atomic_t fscache_n_store_ops;
39447-extern atomic_t fscache_n_store_calls;
39448-extern atomic_t fscache_n_store_pages;
39449-extern atomic_t fscache_n_store_radix_deletes;
39450-extern atomic_t fscache_n_store_pages_over_limit;
39451-
39452-extern atomic_t fscache_n_store_vmscan_not_storing;
39453-extern atomic_t fscache_n_store_vmscan_gone;
39454-extern atomic_t fscache_n_store_vmscan_busy;
39455-extern atomic_t fscache_n_store_vmscan_cancelled;
39456-
39457-extern atomic_t fscache_n_marks;
39458-extern atomic_t fscache_n_uncaches;
39459-
39460-extern atomic_t fscache_n_acquires;
39461-extern atomic_t fscache_n_acquires_null;
39462-extern atomic_t fscache_n_acquires_no_cache;
39463-extern atomic_t fscache_n_acquires_ok;
39464-extern atomic_t fscache_n_acquires_nobufs;
39465-extern atomic_t fscache_n_acquires_oom;
39466-
39467-extern atomic_t fscache_n_updates;
39468-extern atomic_t fscache_n_updates_null;
39469-extern atomic_t fscache_n_updates_run;
39470-
39471-extern atomic_t fscache_n_relinquishes;
39472-extern atomic_t fscache_n_relinquishes_null;
39473-extern atomic_t fscache_n_relinquishes_waitcrt;
39474-extern atomic_t fscache_n_relinquishes_retire;
39475-
39476-extern atomic_t fscache_n_cookie_index;
39477-extern atomic_t fscache_n_cookie_data;
39478-extern atomic_t fscache_n_cookie_special;
39479-
39480-extern atomic_t fscache_n_object_alloc;
39481-extern atomic_t fscache_n_object_no_alloc;
39482-extern atomic_t fscache_n_object_lookups;
39483-extern atomic_t fscache_n_object_lookups_negative;
39484-extern atomic_t fscache_n_object_lookups_positive;
39485-extern atomic_t fscache_n_object_lookups_timed_out;
39486-extern atomic_t fscache_n_object_created;
39487-extern atomic_t fscache_n_object_avail;
39488-extern atomic_t fscache_n_object_dead;
39489-
39490-extern atomic_t fscache_n_checkaux_none;
39491-extern atomic_t fscache_n_checkaux_okay;
39492-extern atomic_t fscache_n_checkaux_update;
39493-extern atomic_t fscache_n_checkaux_obsolete;
39494+extern atomic_unchecked_t fscache_n_op_pend;
39495+extern atomic_unchecked_t fscache_n_op_run;
39496+extern atomic_unchecked_t fscache_n_op_enqueue;
39497+extern atomic_unchecked_t fscache_n_op_deferred_release;
39498+extern atomic_unchecked_t fscache_n_op_release;
39499+extern atomic_unchecked_t fscache_n_op_gc;
39500+extern atomic_unchecked_t fscache_n_op_cancelled;
39501+extern atomic_unchecked_t fscache_n_op_rejected;
39502+
39503+extern atomic_unchecked_t fscache_n_attr_changed;
39504+extern atomic_unchecked_t fscache_n_attr_changed_ok;
39505+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
39506+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
39507+extern atomic_unchecked_t fscache_n_attr_changed_calls;
39508+
39509+extern atomic_unchecked_t fscache_n_allocs;
39510+extern atomic_unchecked_t fscache_n_allocs_ok;
39511+extern atomic_unchecked_t fscache_n_allocs_wait;
39512+extern atomic_unchecked_t fscache_n_allocs_nobufs;
39513+extern atomic_unchecked_t fscache_n_allocs_intr;
39514+extern atomic_unchecked_t fscache_n_allocs_object_dead;
39515+extern atomic_unchecked_t fscache_n_alloc_ops;
39516+extern atomic_unchecked_t fscache_n_alloc_op_waits;
39517+
39518+extern atomic_unchecked_t fscache_n_retrievals;
39519+extern atomic_unchecked_t fscache_n_retrievals_ok;
39520+extern atomic_unchecked_t fscache_n_retrievals_wait;
39521+extern atomic_unchecked_t fscache_n_retrievals_nodata;
39522+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
39523+extern atomic_unchecked_t fscache_n_retrievals_intr;
39524+extern atomic_unchecked_t fscache_n_retrievals_nomem;
39525+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
39526+extern atomic_unchecked_t fscache_n_retrieval_ops;
39527+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
39528+
39529+extern atomic_unchecked_t fscache_n_stores;
39530+extern atomic_unchecked_t fscache_n_stores_ok;
39531+extern atomic_unchecked_t fscache_n_stores_again;
39532+extern atomic_unchecked_t fscache_n_stores_nobufs;
39533+extern atomic_unchecked_t fscache_n_stores_oom;
39534+extern atomic_unchecked_t fscache_n_store_ops;
39535+extern atomic_unchecked_t fscache_n_store_calls;
39536+extern atomic_unchecked_t fscache_n_store_pages;
39537+extern atomic_unchecked_t fscache_n_store_radix_deletes;
39538+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
39539+
39540+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
39541+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
39542+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
39543+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
39544+
39545+extern atomic_unchecked_t fscache_n_marks;
39546+extern atomic_unchecked_t fscache_n_uncaches;
39547+
39548+extern atomic_unchecked_t fscache_n_acquires;
39549+extern atomic_unchecked_t fscache_n_acquires_null;
39550+extern atomic_unchecked_t fscache_n_acquires_no_cache;
39551+extern atomic_unchecked_t fscache_n_acquires_ok;
39552+extern atomic_unchecked_t fscache_n_acquires_nobufs;
39553+extern atomic_unchecked_t fscache_n_acquires_oom;
39554+
39555+extern atomic_unchecked_t fscache_n_updates;
39556+extern atomic_unchecked_t fscache_n_updates_null;
39557+extern atomic_unchecked_t fscache_n_updates_run;
39558+
39559+extern atomic_unchecked_t fscache_n_relinquishes;
39560+extern atomic_unchecked_t fscache_n_relinquishes_null;
39561+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
39562+extern atomic_unchecked_t fscache_n_relinquishes_retire;
39563+
39564+extern atomic_unchecked_t fscache_n_cookie_index;
39565+extern atomic_unchecked_t fscache_n_cookie_data;
39566+extern atomic_unchecked_t fscache_n_cookie_special;
39567+
39568+extern atomic_unchecked_t fscache_n_object_alloc;
39569+extern atomic_unchecked_t fscache_n_object_no_alloc;
39570+extern atomic_unchecked_t fscache_n_object_lookups;
39571+extern atomic_unchecked_t fscache_n_object_lookups_negative;
39572+extern atomic_unchecked_t fscache_n_object_lookups_positive;
39573+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
39574+extern atomic_unchecked_t fscache_n_object_created;
39575+extern atomic_unchecked_t fscache_n_object_avail;
39576+extern atomic_unchecked_t fscache_n_object_dead;
39577+
39578+extern atomic_unchecked_t fscache_n_checkaux_none;
39579+extern atomic_unchecked_t fscache_n_checkaux_okay;
39580+extern atomic_unchecked_t fscache_n_checkaux_update;
39581+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
39582
39583 extern atomic_t fscache_n_cop_alloc_object;
39584 extern atomic_t fscache_n_cop_lookup_object;
39585@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
39586 atomic_inc(stat);
39587 }
39588
39589+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
39590+{
39591+ atomic_inc_unchecked(stat);
39592+}
39593+
39594 static inline void fscache_stat_d(atomic_t *stat)
39595 {
39596 atomic_dec(stat);
39597@@ -267,6 +272,7 @@ extern const struct file_operations fsca
39598
39599 #define __fscache_stat(stat) (NULL)
39600 #define fscache_stat(stat) do {} while (0)
39601+#define fscache_stat_unchecked(stat) do {} while (0)
39602 #define fscache_stat_d(stat) do {} while (0)
39603 #endif
39604
39605diff -urNp linux-3.0.4/fs/fscache/object.c linux-3.0.4/fs/fscache/object.c
39606--- linux-3.0.4/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
39607+++ linux-3.0.4/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
39608@@ -128,7 +128,7 @@ static void fscache_object_state_machine
39609 /* update the object metadata on disk */
39610 case FSCACHE_OBJECT_UPDATING:
39611 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
39612- fscache_stat(&fscache_n_updates_run);
39613+ fscache_stat_unchecked(&fscache_n_updates_run);
39614 fscache_stat(&fscache_n_cop_update_object);
39615 object->cache->ops->update_object(object);
39616 fscache_stat_d(&fscache_n_cop_update_object);
39617@@ -217,7 +217,7 @@ static void fscache_object_state_machine
39618 spin_lock(&object->lock);
39619 object->state = FSCACHE_OBJECT_DEAD;
39620 spin_unlock(&object->lock);
39621- fscache_stat(&fscache_n_object_dead);
39622+ fscache_stat_unchecked(&fscache_n_object_dead);
39623 goto terminal_transit;
39624
39625 /* handle the parent cache of this object being withdrawn from
39626@@ -232,7 +232,7 @@ static void fscache_object_state_machine
39627 spin_lock(&object->lock);
39628 object->state = FSCACHE_OBJECT_DEAD;
39629 spin_unlock(&object->lock);
39630- fscache_stat(&fscache_n_object_dead);
39631+ fscache_stat_unchecked(&fscache_n_object_dead);
39632 goto terminal_transit;
39633
39634 /* complain about the object being woken up once it is
39635@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
39636 parent->cookie->def->name, cookie->def->name,
39637 object->cache->tag->name);
39638
39639- fscache_stat(&fscache_n_object_lookups);
39640+ fscache_stat_unchecked(&fscache_n_object_lookups);
39641 fscache_stat(&fscache_n_cop_lookup_object);
39642 ret = object->cache->ops->lookup_object(object);
39643 fscache_stat_d(&fscache_n_cop_lookup_object);
39644@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
39645 if (ret == -ETIMEDOUT) {
39646 /* probably stuck behind another object, so move this one to
39647 * the back of the queue */
39648- fscache_stat(&fscache_n_object_lookups_timed_out);
39649+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
39650 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39651 }
39652
39653@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
39654
39655 spin_lock(&object->lock);
39656 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39657- fscache_stat(&fscache_n_object_lookups_negative);
39658+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
39659
39660 /* transit here to allow write requests to begin stacking up
39661 * and read requests to begin returning ENODATA */
39662@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
39663 * result, in which case there may be data available */
39664 spin_lock(&object->lock);
39665 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39666- fscache_stat(&fscache_n_object_lookups_positive);
39667+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
39668
39669 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
39670
39671@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
39672 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39673 } else {
39674 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
39675- fscache_stat(&fscache_n_object_created);
39676+ fscache_stat_unchecked(&fscache_n_object_created);
39677
39678 object->state = FSCACHE_OBJECT_AVAILABLE;
39679 spin_unlock(&object->lock);
39680@@ -602,7 +602,7 @@ static void fscache_object_available(str
39681 fscache_enqueue_dependents(object);
39682
39683 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
39684- fscache_stat(&fscache_n_object_avail);
39685+ fscache_stat_unchecked(&fscache_n_object_avail);
39686
39687 _leave("");
39688 }
39689@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
39690 enum fscache_checkaux result;
39691
39692 if (!object->cookie->def->check_aux) {
39693- fscache_stat(&fscache_n_checkaux_none);
39694+ fscache_stat_unchecked(&fscache_n_checkaux_none);
39695 return FSCACHE_CHECKAUX_OKAY;
39696 }
39697
39698@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
39699 switch (result) {
39700 /* entry okay as is */
39701 case FSCACHE_CHECKAUX_OKAY:
39702- fscache_stat(&fscache_n_checkaux_okay);
39703+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
39704 break;
39705
39706 /* entry requires update */
39707 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
39708- fscache_stat(&fscache_n_checkaux_update);
39709+ fscache_stat_unchecked(&fscache_n_checkaux_update);
39710 break;
39711
39712 /* entry requires deletion */
39713 case FSCACHE_CHECKAUX_OBSOLETE:
39714- fscache_stat(&fscache_n_checkaux_obsolete);
39715+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
39716 break;
39717
39718 default:
39719diff -urNp linux-3.0.4/fs/fscache/operation.c linux-3.0.4/fs/fscache/operation.c
39720--- linux-3.0.4/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
39721+++ linux-3.0.4/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
39722@@ -17,7 +17,7 @@
39723 #include <linux/slab.h>
39724 #include "internal.h"
39725
39726-atomic_t fscache_op_debug_id;
39727+atomic_unchecked_t fscache_op_debug_id;
39728 EXPORT_SYMBOL(fscache_op_debug_id);
39729
39730 /**
39731@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
39732 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
39733 ASSERTCMP(atomic_read(&op->usage), >, 0);
39734
39735- fscache_stat(&fscache_n_op_enqueue);
39736+ fscache_stat_unchecked(&fscache_n_op_enqueue);
39737 switch (op->flags & FSCACHE_OP_TYPE) {
39738 case FSCACHE_OP_ASYNC:
39739 _debug("queue async");
39740@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
39741 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
39742 if (op->processor)
39743 fscache_enqueue_operation(op);
39744- fscache_stat(&fscache_n_op_run);
39745+ fscache_stat_unchecked(&fscache_n_op_run);
39746 }
39747
39748 /*
39749@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
39750 if (object->n_ops > 1) {
39751 atomic_inc(&op->usage);
39752 list_add_tail(&op->pend_link, &object->pending_ops);
39753- fscache_stat(&fscache_n_op_pend);
39754+ fscache_stat_unchecked(&fscache_n_op_pend);
39755 } else if (!list_empty(&object->pending_ops)) {
39756 atomic_inc(&op->usage);
39757 list_add_tail(&op->pend_link, &object->pending_ops);
39758- fscache_stat(&fscache_n_op_pend);
39759+ fscache_stat_unchecked(&fscache_n_op_pend);
39760 fscache_start_operations(object);
39761 } else {
39762 ASSERTCMP(object->n_in_progress, ==, 0);
39763@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
39764 object->n_exclusive++; /* reads and writes must wait */
39765 atomic_inc(&op->usage);
39766 list_add_tail(&op->pend_link, &object->pending_ops);
39767- fscache_stat(&fscache_n_op_pend);
39768+ fscache_stat_unchecked(&fscache_n_op_pend);
39769 ret = 0;
39770 } else {
39771 /* not allowed to submit ops in any other state */
39772@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
39773 if (object->n_exclusive > 0) {
39774 atomic_inc(&op->usage);
39775 list_add_tail(&op->pend_link, &object->pending_ops);
39776- fscache_stat(&fscache_n_op_pend);
39777+ fscache_stat_unchecked(&fscache_n_op_pend);
39778 } else if (!list_empty(&object->pending_ops)) {
39779 atomic_inc(&op->usage);
39780 list_add_tail(&op->pend_link, &object->pending_ops);
39781- fscache_stat(&fscache_n_op_pend);
39782+ fscache_stat_unchecked(&fscache_n_op_pend);
39783 fscache_start_operations(object);
39784 } else {
39785 ASSERTCMP(object->n_exclusive, ==, 0);
39786@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
39787 object->n_ops++;
39788 atomic_inc(&op->usage);
39789 list_add_tail(&op->pend_link, &object->pending_ops);
39790- fscache_stat(&fscache_n_op_pend);
39791+ fscache_stat_unchecked(&fscache_n_op_pend);
39792 ret = 0;
39793 } else if (object->state == FSCACHE_OBJECT_DYING ||
39794 object->state == FSCACHE_OBJECT_LC_DYING ||
39795 object->state == FSCACHE_OBJECT_WITHDRAWING) {
39796- fscache_stat(&fscache_n_op_rejected);
39797+ fscache_stat_unchecked(&fscache_n_op_rejected);
39798 ret = -ENOBUFS;
39799 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
39800 fscache_report_unexpected_submission(object, op, ostate);
39801@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
39802
39803 ret = -EBUSY;
39804 if (!list_empty(&op->pend_link)) {
39805- fscache_stat(&fscache_n_op_cancelled);
39806+ fscache_stat_unchecked(&fscache_n_op_cancelled);
39807 list_del_init(&op->pend_link);
39808 object->n_ops--;
39809 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
39810@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
39811 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
39812 BUG();
39813
39814- fscache_stat(&fscache_n_op_release);
39815+ fscache_stat_unchecked(&fscache_n_op_release);
39816
39817 if (op->release) {
39818 op->release(op);
39819@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
39820 * lock, and defer it otherwise */
39821 if (!spin_trylock(&object->lock)) {
39822 _debug("defer put");
39823- fscache_stat(&fscache_n_op_deferred_release);
39824+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
39825
39826 cache = object->cache;
39827 spin_lock(&cache->op_gc_list_lock);
39828@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
39829
39830 _debug("GC DEFERRED REL OBJ%x OP%x",
39831 object->debug_id, op->debug_id);
39832- fscache_stat(&fscache_n_op_gc);
39833+ fscache_stat_unchecked(&fscache_n_op_gc);
39834
39835 ASSERTCMP(atomic_read(&op->usage), ==, 0);
39836
39837diff -urNp linux-3.0.4/fs/fscache/page.c linux-3.0.4/fs/fscache/page.c
39838--- linux-3.0.4/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
39839+++ linux-3.0.4/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
39840@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
39841 val = radix_tree_lookup(&cookie->stores, page->index);
39842 if (!val) {
39843 rcu_read_unlock();
39844- fscache_stat(&fscache_n_store_vmscan_not_storing);
39845+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
39846 __fscache_uncache_page(cookie, page);
39847 return true;
39848 }
39849@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
39850 spin_unlock(&cookie->stores_lock);
39851
39852 if (xpage) {
39853- fscache_stat(&fscache_n_store_vmscan_cancelled);
39854- fscache_stat(&fscache_n_store_radix_deletes);
39855+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
39856+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39857 ASSERTCMP(xpage, ==, page);
39858 } else {
39859- fscache_stat(&fscache_n_store_vmscan_gone);
39860+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
39861 }
39862
39863 wake_up_bit(&cookie->flags, 0);
39864@@ -107,7 +107,7 @@ page_busy:
39865 /* we might want to wait here, but that could deadlock the allocator as
39866 * the work threads writing to the cache may all end up sleeping
39867 * on memory allocation */
39868- fscache_stat(&fscache_n_store_vmscan_busy);
39869+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
39870 return false;
39871 }
39872 EXPORT_SYMBOL(__fscache_maybe_release_page);
39873@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
39874 FSCACHE_COOKIE_STORING_TAG);
39875 if (!radix_tree_tag_get(&cookie->stores, page->index,
39876 FSCACHE_COOKIE_PENDING_TAG)) {
39877- fscache_stat(&fscache_n_store_radix_deletes);
39878+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39879 xpage = radix_tree_delete(&cookie->stores, page->index);
39880 }
39881 spin_unlock(&cookie->stores_lock);
39882@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
39883
39884 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
39885
39886- fscache_stat(&fscache_n_attr_changed_calls);
39887+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
39888
39889 if (fscache_object_is_active(object)) {
39890 fscache_stat(&fscache_n_cop_attr_changed);
39891@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
39892
39893 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39894
39895- fscache_stat(&fscache_n_attr_changed);
39896+ fscache_stat_unchecked(&fscache_n_attr_changed);
39897
39898 op = kzalloc(sizeof(*op), GFP_KERNEL);
39899 if (!op) {
39900- fscache_stat(&fscache_n_attr_changed_nomem);
39901+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
39902 _leave(" = -ENOMEM");
39903 return -ENOMEM;
39904 }
39905@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
39906 if (fscache_submit_exclusive_op(object, op) < 0)
39907 goto nobufs;
39908 spin_unlock(&cookie->lock);
39909- fscache_stat(&fscache_n_attr_changed_ok);
39910+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
39911 fscache_put_operation(op);
39912 _leave(" = 0");
39913 return 0;
39914@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
39915 nobufs:
39916 spin_unlock(&cookie->lock);
39917 kfree(op);
39918- fscache_stat(&fscache_n_attr_changed_nobufs);
39919+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
39920 _leave(" = %d", -ENOBUFS);
39921 return -ENOBUFS;
39922 }
39923@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
39924 /* allocate a retrieval operation and attempt to submit it */
39925 op = kzalloc(sizeof(*op), GFP_NOIO);
39926 if (!op) {
39927- fscache_stat(&fscache_n_retrievals_nomem);
39928+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39929 return NULL;
39930 }
39931
39932@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
39933 return 0;
39934 }
39935
39936- fscache_stat(&fscache_n_retrievals_wait);
39937+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
39938
39939 jif = jiffies;
39940 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
39941 fscache_wait_bit_interruptible,
39942 TASK_INTERRUPTIBLE) != 0) {
39943- fscache_stat(&fscache_n_retrievals_intr);
39944+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
39945 _leave(" = -ERESTARTSYS");
39946 return -ERESTARTSYS;
39947 }
39948@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
39949 */
39950 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
39951 struct fscache_retrieval *op,
39952- atomic_t *stat_op_waits,
39953- atomic_t *stat_object_dead)
39954+ atomic_unchecked_t *stat_op_waits,
39955+ atomic_unchecked_t *stat_object_dead)
39956 {
39957 int ret;
39958
39959@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
39960 goto check_if_dead;
39961
39962 _debug(">>> WT");
39963- fscache_stat(stat_op_waits);
39964+ fscache_stat_unchecked(stat_op_waits);
39965 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
39966 fscache_wait_bit_interruptible,
39967 TASK_INTERRUPTIBLE) < 0) {
39968@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
39969
39970 check_if_dead:
39971 if (unlikely(fscache_object_is_dead(object))) {
39972- fscache_stat(stat_object_dead);
39973+ fscache_stat_unchecked(stat_object_dead);
39974 return -ENOBUFS;
39975 }
39976 return 0;
39977@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
39978
39979 _enter("%p,%p,,,", cookie, page);
39980
39981- fscache_stat(&fscache_n_retrievals);
39982+ fscache_stat_unchecked(&fscache_n_retrievals);
39983
39984 if (hlist_empty(&cookie->backing_objects))
39985 goto nobufs;
39986@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
39987 goto nobufs_unlock;
39988 spin_unlock(&cookie->lock);
39989
39990- fscache_stat(&fscache_n_retrieval_ops);
39991+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
39992
39993 /* pin the netfs read context in case we need to do the actual netfs
39994 * read because we've encountered a cache read failure */
39995@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
39996
39997 error:
39998 if (ret == -ENOMEM)
39999- fscache_stat(&fscache_n_retrievals_nomem);
40000+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
40001 else if (ret == -ERESTARTSYS)
40002- fscache_stat(&fscache_n_retrievals_intr);
40003+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
40004 else if (ret == -ENODATA)
40005- fscache_stat(&fscache_n_retrievals_nodata);
40006+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
40007 else if (ret < 0)
40008- fscache_stat(&fscache_n_retrievals_nobufs);
40009+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40010 else
40011- fscache_stat(&fscache_n_retrievals_ok);
40012+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
40013
40014 fscache_put_retrieval(op);
40015 _leave(" = %d", ret);
40016@@ -429,7 +429,7 @@ nobufs_unlock:
40017 spin_unlock(&cookie->lock);
40018 kfree(op);
40019 nobufs:
40020- fscache_stat(&fscache_n_retrievals_nobufs);
40021+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40022 _leave(" = -ENOBUFS");
40023 return -ENOBUFS;
40024 }
40025@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
40026
40027 _enter("%p,,%d,,,", cookie, *nr_pages);
40028
40029- fscache_stat(&fscache_n_retrievals);
40030+ fscache_stat_unchecked(&fscache_n_retrievals);
40031
40032 if (hlist_empty(&cookie->backing_objects))
40033 goto nobufs;
40034@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
40035 goto nobufs_unlock;
40036 spin_unlock(&cookie->lock);
40037
40038- fscache_stat(&fscache_n_retrieval_ops);
40039+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
40040
40041 /* pin the netfs read context in case we need to do the actual netfs
40042 * read because we've encountered a cache read failure */
40043@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
40044
40045 error:
40046 if (ret == -ENOMEM)
40047- fscache_stat(&fscache_n_retrievals_nomem);
40048+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
40049 else if (ret == -ERESTARTSYS)
40050- fscache_stat(&fscache_n_retrievals_intr);
40051+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
40052 else if (ret == -ENODATA)
40053- fscache_stat(&fscache_n_retrievals_nodata);
40054+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
40055 else if (ret < 0)
40056- fscache_stat(&fscache_n_retrievals_nobufs);
40057+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40058 else
40059- fscache_stat(&fscache_n_retrievals_ok);
40060+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
40061
40062 fscache_put_retrieval(op);
40063 _leave(" = %d", ret);
40064@@ -545,7 +545,7 @@ nobufs_unlock:
40065 spin_unlock(&cookie->lock);
40066 kfree(op);
40067 nobufs:
40068- fscache_stat(&fscache_n_retrievals_nobufs);
40069+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40070 _leave(" = -ENOBUFS");
40071 return -ENOBUFS;
40072 }
40073@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
40074
40075 _enter("%p,%p,,,", cookie, page);
40076
40077- fscache_stat(&fscache_n_allocs);
40078+ fscache_stat_unchecked(&fscache_n_allocs);
40079
40080 if (hlist_empty(&cookie->backing_objects))
40081 goto nobufs;
40082@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
40083 goto nobufs_unlock;
40084 spin_unlock(&cookie->lock);
40085
40086- fscache_stat(&fscache_n_alloc_ops);
40087+ fscache_stat_unchecked(&fscache_n_alloc_ops);
40088
40089 ret = fscache_wait_for_retrieval_activation(
40090 object, op,
40091@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
40092
40093 error:
40094 if (ret == -ERESTARTSYS)
40095- fscache_stat(&fscache_n_allocs_intr);
40096+ fscache_stat_unchecked(&fscache_n_allocs_intr);
40097 else if (ret < 0)
40098- fscache_stat(&fscache_n_allocs_nobufs);
40099+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40100 else
40101- fscache_stat(&fscache_n_allocs_ok);
40102+ fscache_stat_unchecked(&fscache_n_allocs_ok);
40103
40104 fscache_put_retrieval(op);
40105 _leave(" = %d", ret);
40106@@ -625,7 +625,7 @@ nobufs_unlock:
40107 spin_unlock(&cookie->lock);
40108 kfree(op);
40109 nobufs:
40110- fscache_stat(&fscache_n_allocs_nobufs);
40111+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40112 _leave(" = -ENOBUFS");
40113 return -ENOBUFS;
40114 }
40115@@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
40116
40117 spin_lock(&cookie->stores_lock);
40118
40119- fscache_stat(&fscache_n_store_calls);
40120+ fscache_stat_unchecked(&fscache_n_store_calls);
40121
40122 /* find a page to store */
40123 page = NULL;
40124@@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
40125 page = results[0];
40126 _debug("gang %d [%lx]", n, page->index);
40127 if (page->index > op->store_limit) {
40128- fscache_stat(&fscache_n_store_pages_over_limit);
40129+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
40130 goto superseded;
40131 }
40132
40133@@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
40134 spin_unlock(&cookie->stores_lock);
40135 spin_unlock(&object->lock);
40136
40137- fscache_stat(&fscache_n_store_pages);
40138+ fscache_stat_unchecked(&fscache_n_store_pages);
40139 fscache_stat(&fscache_n_cop_write_page);
40140 ret = object->cache->ops->write_page(op, page);
40141 fscache_stat_d(&fscache_n_cop_write_page);
40142@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
40143 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40144 ASSERT(PageFsCache(page));
40145
40146- fscache_stat(&fscache_n_stores);
40147+ fscache_stat_unchecked(&fscache_n_stores);
40148
40149 op = kzalloc(sizeof(*op), GFP_NOIO);
40150 if (!op)
40151@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
40152 spin_unlock(&cookie->stores_lock);
40153 spin_unlock(&object->lock);
40154
40155- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
40156+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
40157 op->store_limit = object->store_limit;
40158
40159 if (fscache_submit_op(object, &op->op) < 0)
40160@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
40161
40162 spin_unlock(&cookie->lock);
40163 radix_tree_preload_end();
40164- fscache_stat(&fscache_n_store_ops);
40165- fscache_stat(&fscache_n_stores_ok);
40166+ fscache_stat_unchecked(&fscache_n_store_ops);
40167+ fscache_stat_unchecked(&fscache_n_stores_ok);
40168
40169 /* the work queue now carries its own ref on the object */
40170 fscache_put_operation(&op->op);
40171@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
40172 return 0;
40173
40174 already_queued:
40175- fscache_stat(&fscache_n_stores_again);
40176+ fscache_stat_unchecked(&fscache_n_stores_again);
40177 already_pending:
40178 spin_unlock(&cookie->stores_lock);
40179 spin_unlock(&object->lock);
40180 spin_unlock(&cookie->lock);
40181 radix_tree_preload_end();
40182 kfree(op);
40183- fscache_stat(&fscache_n_stores_ok);
40184+ fscache_stat_unchecked(&fscache_n_stores_ok);
40185 _leave(" = 0");
40186 return 0;
40187
40188@@ -851,14 +851,14 @@ nobufs:
40189 spin_unlock(&cookie->lock);
40190 radix_tree_preload_end();
40191 kfree(op);
40192- fscache_stat(&fscache_n_stores_nobufs);
40193+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
40194 _leave(" = -ENOBUFS");
40195 return -ENOBUFS;
40196
40197 nomem_free:
40198 kfree(op);
40199 nomem:
40200- fscache_stat(&fscache_n_stores_oom);
40201+ fscache_stat_unchecked(&fscache_n_stores_oom);
40202 _leave(" = -ENOMEM");
40203 return -ENOMEM;
40204 }
40205@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
40206 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40207 ASSERTCMP(page, !=, NULL);
40208
40209- fscache_stat(&fscache_n_uncaches);
40210+ fscache_stat_unchecked(&fscache_n_uncaches);
40211
40212 /* cache withdrawal may beat us to it */
40213 if (!PageFsCache(page))
40214@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
40215 unsigned long loop;
40216
40217 #ifdef CONFIG_FSCACHE_STATS
40218- atomic_add(pagevec->nr, &fscache_n_marks);
40219+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
40220 #endif
40221
40222 for (loop = 0; loop < pagevec->nr; loop++) {
40223diff -urNp linux-3.0.4/fs/fscache/stats.c linux-3.0.4/fs/fscache/stats.c
40224--- linux-3.0.4/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
40225+++ linux-3.0.4/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
40226@@ -18,95 +18,95 @@
40227 /*
40228 * operation counters
40229 */
40230-atomic_t fscache_n_op_pend;
40231-atomic_t fscache_n_op_run;
40232-atomic_t fscache_n_op_enqueue;
40233-atomic_t fscache_n_op_requeue;
40234-atomic_t fscache_n_op_deferred_release;
40235-atomic_t fscache_n_op_release;
40236-atomic_t fscache_n_op_gc;
40237-atomic_t fscache_n_op_cancelled;
40238-atomic_t fscache_n_op_rejected;
40239-
40240-atomic_t fscache_n_attr_changed;
40241-atomic_t fscache_n_attr_changed_ok;
40242-atomic_t fscache_n_attr_changed_nobufs;
40243-atomic_t fscache_n_attr_changed_nomem;
40244-atomic_t fscache_n_attr_changed_calls;
40245-
40246-atomic_t fscache_n_allocs;
40247-atomic_t fscache_n_allocs_ok;
40248-atomic_t fscache_n_allocs_wait;
40249-atomic_t fscache_n_allocs_nobufs;
40250-atomic_t fscache_n_allocs_intr;
40251-atomic_t fscache_n_allocs_object_dead;
40252-atomic_t fscache_n_alloc_ops;
40253-atomic_t fscache_n_alloc_op_waits;
40254-
40255-atomic_t fscache_n_retrievals;
40256-atomic_t fscache_n_retrievals_ok;
40257-atomic_t fscache_n_retrievals_wait;
40258-atomic_t fscache_n_retrievals_nodata;
40259-atomic_t fscache_n_retrievals_nobufs;
40260-atomic_t fscache_n_retrievals_intr;
40261-atomic_t fscache_n_retrievals_nomem;
40262-atomic_t fscache_n_retrievals_object_dead;
40263-atomic_t fscache_n_retrieval_ops;
40264-atomic_t fscache_n_retrieval_op_waits;
40265-
40266-atomic_t fscache_n_stores;
40267-atomic_t fscache_n_stores_ok;
40268-atomic_t fscache_n_stores_again;
40269-atomic_t fscache_n_stores_nobufs;
40270-atomic_t fscache_n_stores_oom;
40271-atomic_t fscache_n_store_ops;
40272-atomic_t fscache_n_store_calls;
40273-atomic_t fscache_n_store_pages;
40274-atomic_t fscache_n_store_radix_deletes;
40275-atomic_t fscache_n_store_pages_over_limit;
40276-
40277-atomic_t fscache_n_store_vmscan_not_storing;
40278-atomic_t fscache_n_store_vmscan_gone;
40279-atomic_t fscache_n_store_vmscan_busy;
40280-atomic_t fscache_n_store_vmscan_cancelled;
40281-
40282-atomic_t fscache_n_marks;
40283-atomic_t fscache_n_uncaches;
40284-
40285-atomic_t fscache_n_acquires;
40286-atomic_t fscache_n_acquires_null;
40287-atomic_t fscache_n_acquires_no_cache;
40288-atomic_t fscache_n_acquires_ok;
40289-atomic_t fscache_n_acquires_nobufs;
40290-atomic_t fscache_n_acquires_oom;
40291-
40292-atomic_t fscache_n_updates;
40293-atomic_t fscache_n_updates_null;
40294-atomic_t fscache_n_updates_run;
40295-
40296-atomic_t fscache_n_relinquishes;
40297-atomic_t fscache_n_relinquishes_null;
40298-atomic_t fscache_n_relinquishes_waitcrt;
40299-atomic_t fscache_n_relinquishes_retire;
40300-
40301-atomic_t fscache_n_cookie_index;
40302-atomic_t fscache_n_cookie_data;
40303-atomic_t fscache_n_cookie_special;
40304-
40305-atomic_t fscache_n_object_alloc;
40306-atomic_t fscache_n_object_no_alloc;
40307-atomic_t fscache_n_object_lookups;
40308-atomic_t fscache_n_object_lookups_negative;
40309-atomic_t fscache_n_object_lookups_positive;
40310-atomic_t fscache_n_object_lookups_timed_out;
40311-atomic_t fscache_n_object_created;
40312-atomic_t fscache_n_object_avail;
40313-atomic_t fscache_n_object_dead;
40314-
40315-atomic_t fscache_n_checkaux_none;
40316-atomic_t fscache_n_checkaux_okay;
40317-atomic_t fscache_n_checkaux_update;
40318-atomic_t fscache_n_checkaux_obsolete;
40319+atomic_unchecked_t fscache_n_op_pend;
40320+atomic_unchecked_t fscache_n_op_run;
40321+atomic_unchecked_t fscache_n_op_enqueue;
40322+atomic_unchecked_t fscache_n_op_requeue;
40323+atomic_unchecked_t fscache_n_op_deferred_release;
40324+atomic_unchecked_t fscache_n_op_release;
40325+atomic_unchecked_t fscache_n_op_gc;
40326+atomic_unchecked_t fscache_n_op_cancelled;
40327+atomic_unchecked_t fscache_n_op_rejected;
40328+
40329+atomic_unchecked_t fscache_n_attr_changed;
40330+atomic_unchecked_t fscache_n_attr_changed_ok;
40331+atomic_unchecked_t fscache_n_attr_changed_nobufs;
40332+atomic_unchecked_t fscache_n_attr_changed_nomem;
40333+atomic_unchecked_t fscache_n_attr_changed_calls;
40334+
40335+atomic_unchecked_t fscache_n_allocs;
40336+atomic_unchecked_t fscache_n_allocs_ok;
40337+atomic_unchecked_t fscache_n_allocs_wait;
40338+atomic_unchecked_t fscache_n_allocs_nobufs;
40339+atomic_unchecked_t fscache_n_allocs_intr;
40340+atomic_unchecked_t fscache_n_allocs_object_dead;
40341+atomic_unchecked_t fscache_n_alloc_ops;
40342+atomic_unchecked_t fscache_n_alloc_op_waits;
40343+
40344+atomic_unchecked_t fscache_n_retrievals;
40345+atomic_unchecked_t fscache_n_retrievals_ok;
40346+atomic_unchecked_t fscache_n_retrievals_wait;
40347+atomic_unchecked_t fscache_n_retrievals_nodata;
40348+atomic_unchecked_t fscache_n_retrievals_nobufs;
40349+atomic_unchecked_t fscache_n_retrievals_intr;
40350+atomic_unchecked_t fscache_n_retrievals_nomem;
40351+atomic_unchecked_t fscache_n_retrievals_object_dead;
40352+atomic_unchecked_t fscache_n_retrieval_ops;
40353+atomic_unchecked_t fscache_n_retrieval_op_waits;
40354+
40355+atomic_unchecked_t fscache_n_stores;
40356+atomic_unchecked_t fscache_n_stores_ok;
40357+atomic_unchecked_t fscache_n_stores_again;
40358+atomic_unchecked_t fscache_n_stores_nobufs;
40359+atomic_unchecked_t fscache_n_stores_oom;
40360+atomic_unchecked_t fscache_n_store_ops;
40361+atomic_unchecked_t fscache_n_store_calls;
40362+atomic_unchecked_t fscache_n_store_pages;
40363+atomic_unchecked_t fscache_n_store_radix_deletes;
40364+atomic_unchecked_t fscache_n_store_pages_over_limit;
40365+
40366+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
40367+atomic_unchecked_t fscache_n_store_vmscan_gone;
40368+atomic_unchecked_t fscache_n_store_vmscan_busy;
40369+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
40370+
40371+atomic_unchecked_t fscache_n_marks;
40372+atomic_unchecked_t fscache_n_uncaches;
40373+
40374+atomic_unchecked_t fscache_n_acquires;
40375+atomic_unchecked_t fscache_n_acquires_null;
40376+atomic_unchecked_t fscache_n_acquires_no_cache;
40377+atomic_unchecked_t fscache_n_acquires_ok;
40378+atomic_unchecked_t fscache_n_acquires_nobufs;
40379+atomic_unchecked_t fscache_n_acquires_oom;
40380+
40381+atomic_unchecked_t fscache_n_updates;
40382+atomic_unchecked_t fscache_n_updates_null;
40383+atomic_unchecked_t fscache_n_updates_run;
40384+
40385+atomic_unchecked_t fscache_n_relinquishes;
40386+atomic_unchecked_t fscache_n_relinquishes_null;
40387+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
40388+atomic_unchecked_t fscache_n_relinquishes_retire;
40389+
40390+atomic_unchecked_t fscache_n_cookie_index;
40391+atomic_unchecked_t fscache_n_cookie_data;
40392+atomic_unchecked_t fscache_n_cookie_special;
40393+
40394+atomic_unchecked_t fscache_n_object_alloc;
40395+atomic_unchecked_t fscache_n_object_no_alloc;
40396+atomic_unchecked_t fscache_n_object_lookups;
40397+atomic_unchecked_t fscache_n_object_lookups_negative;
40398+atomic_unchecked_t fscache_n_object_lookups_positive;
40399+atomic_unchecked_t fscache_n_object_lookups_timed_out;
40400+atomic_unchecked_t fscache_n_object_created;
40401+atomic_unchecked_t fscache_n_object_avail;
40402+atomic_unchecked_t fscache_n_object_dead;
40403+
40404+atomic_unchecked_t fscache_n_checkaux_none;
40405+atomic_unchecked_t fscache_n_checkaux_okay;
40406+atomic_unchecked_t fscache_n_checkaux_update;
40407+atomic_unchecked_t fscache_n_checkaux_obsolete;
40408
40409 atomic_t fscache_n_cop_alloc_object;
40410 atomic_t fscache_n_cop_lookup_object;
40411@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
40412 seq_puts(m, "FS-Cache statistics\n");
40413
40414 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
40415- atomic_read(&fscache_n_cookie_index),
40416- atomic_read(&fscache_n_cookie_data),
40417- atomic_read(&fscache_n_cookie_special));
40418+ atomic_read_unchecked(&fscache_n_cookie_index),
40419+ atomic_read_unchecked(&fscache_n_cookie_data),
40420+ atomic_read_unchecked(&fscache_n_cookie_special));
40421
40422 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
40423- atomic_read(&fscache_n_object_alloc),
40424- atomic_read(&fscache_n_object_no_alloc),
40425- atomic_read(&fscache_n_object_avail),
40426- atomic_read(&fscache_n_object_dead));
40427+ atomic_read_unchecked(&fscache_n_object_alloc),
40428+ atomic_read_unchecked(&fscache_n_object_no_alloc),
40429+ atomic_read_unchecked(&fscache_n_object_avail),
40430+ atomic_read_unchecked(&fscache_n_object_dead));
40431 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
40432- atomic_read(&fscache_n_checkaux_none),
40433- atomic_read(&fscache_n_checkaux_okay),
40434- atomic_read(&fscache_n_checkaux_update),
40435- atomic_read(&fscache_n_checkaux_obsolete));
40436+ atomic_read_unchecked(&fscache_n_checkaux_none),
40437+ atomic_read_unchecked(&fscache_n_checkaux_okay),
40438+ atomic_read_unchecked(&fscache_n_checkaux_update),
40439+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
40440
40441 seq_printf(m, "Pages : mrk=%u unc=%u\n",
40442- atomic_read(&fscache_n_marks),
40443- atomic_read(&fscache_n_uncaches));
40444+ atomic_read_unchecked(&fscache_n_marks),
40445+ atomic_read_unchecked(&fscache_n_uncaches));
40446
40447 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
40448 " oom=%u\n",
40449- atomic_read(&fscache_n_acquires),
40450- atomic_read(&fscache_n_acquires_null),
40451- atomic_read(&fscache_n_acquires_no_cache),
40452- atomic_read(&fscache_n_acquires_ok),
40453- atomic_read(&fscache_n_acquires_nobufs),
40454- atomic_read(&fscache_n_acquires_oom));
40455+ atomic_read_unchecked(&fscache_n_acquires),
40456+ atomic_read_unchecked(&fscache_n_acquires_null),
40457+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
40458+ atomic_read_unchecked(&fscache_n_acquires_ok),
40459+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
40460+ atomic_read_unchecked(&fscache_n_acquires_oom));
40461
40462 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
40463- atomic_read(&fscache_n_object_lookups),
40464- atomic_read(&fscache_n_object_lookups_negative),
40465- atomic_read(&fscache_n_object_lookups_positive),
40466- atomic_read(&fscache_n_object_created),
40467- atomic_read(&fscache_n_object_lookups_timed_out));
40468+ atomic_read_unchecked(&fscache_n_object_lookups),
40469+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
40470+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
40471+ atomic_read_unchecked(&fscache_n_object_created),
40472+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
40473
40474 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
40475- atomic_read(&fscache_n_updates),
40476- atomic_read(&fscache_n_updates_null),
40477- atomic_read(&fscache_n_updates_run));
40478+ atomic_read_unchecked(&fscache_n_updates),
40479+ atomic_read_unchecked(&fscache_n_updates_null),
40480+ atomic_read_unchecked(&fscache_n_updates_run));
40481
40482 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
40483- atomic_read(&fscache_n_relinquishes),
40484- atomic_read(&fscache_n_relinquishes_null),
40485- atomic_read(&fscache_n_relinquishes_waitcrt),
40486- atomic_read(&fscache_n_relinquishes_retire));
40487+ atomic_read_unchecked(&fscache_n_relinquishes),
40488+ atomic_read_unchecked(&fscache_n_relinquishes_null),
40489+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
40490+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
40491
40492 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
40493- atomic_read(&fscache_n_attr_changed),
40494- atomic_read(&fscache_n_attr_changed_ok),
40495- atomic_read(&fscache_n_attr_changed_nobufs),
40496- atomic_read(&fscache_n_attr_changed_nomem),
40497- atomic_read(&fscache_n_attr_changed_calls));
40498+ atomic_read_unchecked(&fscache_n_attr_changed),
40499+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
40500+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
40501+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
40502+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
40503
40504 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
40505- atomic_read(&fscache_n_allocs),
40506- atomic_read(&fscache_n_allocs_ok),
40507- atomic_read(&fscache_n_allocs_wait),
40508- atomic_read(&fscache_n_allocs_nobufs),
40509- atomic_read(&fscache_n_allocs_intr));
40510+ atomic_read_unchecked(&fscache_n_allocs),
40511+ atomic_read_unchecked(&fscache_n_allocs_ok),
40512+ atomic_read_unchecked(&fscache_n_allocs_wait),
40513+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
40514+ atomic_read_unchecked(&fscache_n_allocs_intr));
40515 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
40516- atomic_read(&fscache_n_alloc_ops),
40517- atomic_read(&fscache_n_alloc_op_waits),
40518- atomic_read(&fscache_n_allocs_object_dead));
40519+ atomic_read_unchecked(&fscache_n_alloc_ops),
40520+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
40521+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
40522
40523 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
40524 " int=%u oom=%u\n",
40525- atomic_read(&fscache_n_retrievals),
40526- atomic_read(&fscache_n_retrievals_ok),
40527- atomic_read(&fscache_n_retrievals_wait),
40528- atomic_read(&fscache_n_retrievals_nodata),
40529- atomic_read(&fscache_n_retrievals_nobufs),
40530- atomic_read(&fscache_n_retrievals_intr),
40531- atomic_read(&fscache_n_retrievals_nomem));
40532+ atomic_read_unchecked(&fscache_n_retrievals),
40533+ atomic_read_unchecked(&fscache_n_retrievals_ok),
40534+ atomic_read_unchecked(&fscache_n_retrievals_wait),
40535+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
40536+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
40537+ atomic_read_unchecked(&fscache_n_retrievals_intr),
40538+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
40539 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
40540- atomic_read(&fscache_n_retrieval_ops),
40541- atomic_read(&fscache_n_retrieval_op_waits),
40542- atomic_read(&fscache_n_retrievals_object_dead));
40543+ atomic_read_unchecked(&fscache_n_retrieval_ops),
40544+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
40545+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
40546
40547 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
40548- atomic_read(&fscache_n_stores),
40549- atomic_read(&fscache_n_stores_ok),
40550- atomic_read(&fscache_n_stores_again),
40551- atomic_read(&fscache_n_stores_nobufs),
40552- atomic_read(&fscache_n_stores_oom));
40553+ atomic_read_unchecked(&fscache_n_stores),
40554+ atomic_read_unchecked(&fscache_n_stores_ok),
40555+ atomic_read_unchecked(&fscache_n_stores_again),
40556+ atomic_read_unchecked(&fscache_n_stores_nobufs),
40557+ atomic_read_unchecked(&fscache_n_stores_oom));
40558 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
40559- atomic_read(&fscache_n_store_ops),
40560- atomic_read(&fscache_n_store_calls),
40561- atomic_read(&fscache_n_store_pages),
40562- atomic_read(&fscache_n_store_radix_deletes),
40563- atomic_read(&fscache_n_store_pages_over_limit));
40564+ atomic_read_unchecked(&fscache_n_store_ops),
40565+ atomic_read_unchecked(&fscache_n_store_calls),
40566+ atomic_read_unchecked(&fscache_n_store_pages),
40567+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
40568+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
40569
40570 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
40571- atomic_read(&fscache_n_store_vmscan_not_storing),
40572- atomic_read(&fscache_n_store_vmscan_gone),
40573- atomic_read(&fscache_n_store_vmscan_busy),
40574- atomic_read(&fscache_n_store_vmscan_cancelled));
40575+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
40576+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
40577+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
40578+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
40579
40580 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
40581- atomic_read(&fscache_n_op_pend),
40582- atomic_read(&fscache_n_op_run),
40583- atomic_read(&fscache_n_op_enqueue),
40584- atomic_read(&fscache_n_op_cancelled),
40585- atomic_read(&fscache_n_op_rejected));
40586+ atomic_read_unchecked(&fscache_n_op_pend),
40587+ atomic_read_unchecked(&fscache_n_op_run),
40588+ atomic_read_unchecked(&fscache_n_op_enqueue),
40589+ atomic_read_unchecked(&fscache_n_op_cancelled),
40590+ atomic_read_unchecked(&fscache_n_op_rejected));
40591 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
40592- atomic_read(&fscache_n_op_deferred_release),
40593- atomic_read(&fscache_n_op_release),
40594- atomic_read(&fscache_n_op_gc));
40595+ atomic_read_unchecked(&fscache_n_op_deferred_release),
40596+ atomic_read_unchecked(&fscache_n_op_release),
40597+ atomic_read_unchecked(&fscache_n_op_gc));
40598
40599 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
40600 atomic_read(&fscache_n_cop_alloc_object),
40601diff -urNp linux-3.0.4/fs/fs_struct.c linux-3.0.4/fs/fs_struct.c
40602--- linux-3.0.4/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
40603+++ linux-3.0.4/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
40604@@ -4,6 +4,7 @@
40605 #include <linux/path.h>
40606 #include <linux/slab.h>
40607 #include <linux/fs_struct.h>
40608+#include <linux/grsecurity.h>
40609 #include "internal.h"
40610
40611 static inline void path_get_longterm(struct path *path)
40612@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
40613 old_root = fs->root;
40614 fs->root = *path;
40615 path_get_longterm(path);
40616+ gr_set_chroot_entries(current, path);
40617 write_seqcount_end(&fs->seq);
40618 spin_unlock(&fs->lock);
40619 if (old_root.dentry)
40620@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
40621 && fs->root.mnt == old_root->mnt) {
40622 path_get_longterm(new_root);
40623 fs->root = *new_root;
40624+ gr_set_chroot_entries(p, new_root);
40625 count++;
40626 }
40627 if (fs->pwd.dentry == old_root->dentry
40628@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
40629 spin_lock(&fs->lock);
40630 write_seqcount_begin(&fs->seq);
40631 tsk->fs = NULL;
40632- kill = !--fs->users;
40633+ gr_clear_chroot_entries(tsk);
40634+ kill = !atomic_dec_return(&fs->users);
40635 write_seqcount_end(&fs->seq);
40636 spin_unlock(&fs->lock);
40637 task_unlock(tsk);
40638@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
40639 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
40640 /* We don't need to lock fs - think why ;-) */
40641 if (fs) {
40642- fs->users = 1;
40643+ atomic_set(&fs->users, 1);
40644 fs->in_exec = 0;
40645 spin_lock_init(&fs->lock);
40646 seqcount_init(&fs->seq);
40647@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
40648 spin_lock(&old->lock);
40649 fs->root = old->root;
40650 path_get_longterm(&fs->root);
40651+ /* instead of calling gr_set_chroot_entries here,
40652+ we call it from every caller of this function
40653+ */
40654 fs->pwd = old->pwd;
40655 path_get_longterm(&fs->pwd);
40656 spin_unlock(&old->lock);
40657@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
40658
40659 task_lock(current);
40660 spin_lock(&fs->lock);
40661- kill = !--fs->users;
40662+ kill = !atomic_dec_return(&fs->users);
40663 current->fs = new_fs;
40664+ gr_set_chroot_entries(current, &new_fs->root);
40665 spin_unlock(&fs->lock);
40666 task_unlock(current);
40667
40668@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
40669
40670 /* to be mentioned only in INIT_TASK */
40671 struct fs_struct init_fs = {
40672- .users = 1,
40673+ .users = ATOMIC_INIT(1),
40674 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
40675 .seq = SEQCNT_ZERO,
40676 .umask = 0022,
40677@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
40678 task_lock(current);
40679
40680 spin_lock(&init_fs.lock);
40681- init_fs.users++;
40682+ atomic_inc(&init_fs.users);
40683 spin_unlock(&init_fs.lock);
40684
40685 spin_lock(&fs->lock);
40686 current->fs = &init_fs;
40687- kill = !--fs->users;
40688+ gr_set_chroot_entries(current, &current->fs->root);
40689+ kill = !atomic_dec_return(&fs->users);
40690 spin_unlock(&fs->lock);
40691
40692 task_unlock(current);
40693diff -urNp linux-3.0.4/fs/fuse/cuse.c linux-3.0.4/fs/fuse/cuse.c
40694--- linux-3.0.4/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
40695+++ linux-3.0.4/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
40696@@ -586,10 +586,12 @@ static int __init cuse_init(void)
40697 INIT_LIST_HEAD(&cuse_conntbl[i]);
40698
40699 /* inherit and extend fuse_dev_operations */
40700- cuse_channel_fops = fuse_dev_operations;
40701- cuse_channel_fops.owner = THIS_MODULE;
40702- cuse_channel_fops.open = cuse_channel_open;
40703- cuse_channel_fops.release = cuse_channel_release;
40704+ pax_open_kernel();
40705+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
40706+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
40707+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
40708+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
40709+ pax_close_kernel();
40710
40711 cuse_class = class_create(THIS_MODULE, "cuse");
40712 if (IS_ERR(cuse_class))
40713diff -urNp linux-3.0.4/fs/fuse/dev.c linux-3.0.4/fs/fuse/dev.c
40714--- linux-3.0.4/fs/fuse/dev.c 2011-09-02 18:11:26.000000000 -0400
40715+++ linux-3.0.4/fs/fuse/dev.c 2011-08-29 23:26:27.000000000 -0400
40716@@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
40717 ret = 0;
40718 pipe_lock(pipe);
40719
40720- if (!pipe->readers) {
40721+ if (!atomic_read(&pipe->readers)) {
40722 send_sig(SIGPIPE, current, 0);
40723 if (!ret)
40724 ret = -EPIPE;
40725diff -urNp linux-3.0.4/fs/fuse/dir.c linux-3.0.4/fs/fuse/dir.c
40726--- linux-3.0.4/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
40727+++ linux-3.0.4/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
40728@@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
40729 return link;
40730 }
40731
40732-static void free_link(char *link)
40733+static void free_link(const char *link)
40734 {
40735 if (!IS_ERR(link))
40736 free_page((unsigned long) link);
40737diff -urNp linux-3.0.4/fs/gfs2/inode.c linux-3.0.4/fs/gfs2/inode.c
40738--- linux-3.0.4/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
40739+++ linux-3.0.4/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
40740@@ -1525,7 +1525,7 @@ out:
40741
40742 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
40743 {
40744- char *s = nd_get_link(nd);
40745+ const char *s = nd_get_link(nd);
40746 if (!IS_ERR(s))
40747 kfree(s);
40748 }
40749diff -urNp linux-3.0.4/fs/hfsplus/catalog.c linux-3.0.4/fs/hfsplus/catalog.c
40750--- linux-3.0.4/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
40751+++ linux-3.0.4/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
40752@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
40753 int err;
40754 u16 type;
40755
40756+ pax_track_stack();
40757+
40758 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
40759 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
40760 if (err)
40761@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
40762 int entry_size;
40763 int err;
40764
40765+ pax_track_stack();
40766+
40767 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
40768 str->name, cnid, inode->i_nlink);
40769 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
40770@@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
40771 int entry_size, type;
40772 int err = 0;
40773
40774+ pax_track_stack();
40775+
40776 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
40777 cnid, src_dir->i_ino, src_name->name,
40778 dst_dir->i_ino, dst_name->name);
40779diff -urNp linux-3.0.4/fs/hfsplus/dir.c linux-3.0.4/fs/hfsplus/dir.c
40780--- linux-3.0.4/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
40781+++ linux-3.0.4/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
40782@@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
40783 struct hfsplus_readdir_data *rd;
40784 u16 type;
40785
40786+ pax_track_stack();
40787+
40788 if (filp->f_pos >= inode->i_size)
40789 return 0;
40790
40791diff -urNp linux-3.0.4/fs/hfsplus/inode.c linux-3.0.4/fs/hfsplus/inode.c
40792--- linux-3.0.4/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
40793+++ linux-3.0.4/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
40794@@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
40795 int res = 0;
40796 u16 type;
40797
40798+ pax_track_stack();
40799+
40800 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
40801
40802 HFSPLUS_I(inode)->linkid = 0;
40803@@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
40804 struct hfs_find_data fd;
40805 hfsplus_cat_entry entry;
40806
40807+ pax_track_stack();
40808+
40809 if (HFSPLUS_IS_RSRC(inode))
40810 main_inode = HFSPLUS_I(inode)->rsrc_inode;
40811
40812diff -urNp linux-3.0.4/fs/hfsplus/ioctl.c linux-3.0.4/fs/hfsplus/ioctl.c
40813--- linux-3.0.4/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
40814+++ linux-3.0.4/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
40815@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
40816 struct hfsplus_cat_file *file;
40817 int res;
40818
40819+ pax_track_stack();
40820+
40821 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40822 return -EOPNOTSUPP;
40823
40824@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
40825 struct hfsplus_cat_file *file;
40826 ssize_t res = 0;
40827
40828+ pax_track_stack();
40829+
40830 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40831 return -EOPNOTSUPP;
40832
40833diff -urNp linux-3.0.4/fs/hfsplus/super.c linux-3.0.4/fs/hfsplus/super.c
40834--- linux-3.0.4/fs/hfsplus/super.c 2011-07-21 22:17:23.000000000 -0400
40835+++ linux-3.0.4/fs/hfsplus/super.c 2011-08-23 21:48:14.000000000 -0400
40836@@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
40837 struct nls_table *nls = NULL;
40838 int err;
40839
40840+ pax_track_stack();
40841+
40842 err = -EINVAL;
40843 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
40844 if (!sbi)
40845diff -urNp linux-3.0.4/fs/hugetlbfs/inode.c linux-3.0.4/fs/hugetlbfs/inode.c
40846--- linux-3.0.4/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
40847+++ linux-3.0.4/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
40848@@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
40849 .kill_sb = kill_litter_super,
40850 };
40851
40852-static struct vfsmount *hugetlbfs_vfsmount;
40853+struct vfsmount *hugetlbfs_vfsmount;
40854
40855 static int can_do_hugetlb_shm(void)
40856 {
40857diff -urNp linux-3.0.4/fs/inode.c linux-3.0.4/fs/inode.c
40858--- linux-3.0.4/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
40859+++ linux-3.0.4/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
40860@@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
40861
40862 #ifdef CONFIG_SMP
40863 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
40864- static atomic_t shared_last_ino;
40865- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
40866+ static atomic_unchecked_t shared_last_ino;
40867+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
40868
40869 res = next - LAST_INO_BATCH;
40870 }
40871diff -urNp linux-3.0.4/fs/jbd/checkpoint.c linux-3.0.4/fs/jbd/checkpoint.c
40872--- linux-3.0.4/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
40873+++ linux-3.0.4/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
40874@@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
40875 tid_t this_tid;
40876 int result;
40877
40878+ pax_track_stack();
40879+
40880 jbd_debug(1, "Start checkpoint\n");
40881
40882 /*
40883diff -urNp linux-3.0.4/fs/jffs2/compr_rtime.c linux-3.0.4/fs/jffs2/compr_rtime.c
40884--- linux-3.0.4/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
40885+++ linux-3.0.4/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
40886@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
40887 int outpos = 0;
40888 int pos=0;
40889
40890+ pax_track_stack();
40891+
40892 memset(positions,0,sizeof(positions));
40893
40894 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
40895@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
40896 int outpos = 0;
40897 int pos=0;
40898
40899+ pax_track_stack();
40900+
40901 memset(positions,0,sizeof(positions));
40902
40903 while (outpos<destlen) {
40904diff -urNp linux-3.0.4/fs/jffs2/compr_rubin.c linux-3.0.4/fs/jffs2/compr_rubin.c
40905--- linux-3.0.4/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
40906+++ linux-3.0.4/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
40907@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
40908 int ret;
40909 uint32_t mysrclen, mydstlen;
40910
40911+ pax_track_stack();
40912+
40913 mysrclen = *sourcelen;
40914 mydstlen = *dstlen - 8;
40915
40916diff -urNp linux-3.0.4/fs/jffs2/erase.c linux-3.0.4/fs/jffs2/erase.c
40917--- linux-3.0.4/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
40918+++ linux-3.0.4/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
40919@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
40920 struct jffs2_unknown_node marker = {
40921 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
40922 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40923- .totlen = cpu_to_je32(c->cleanmarker_size)
40924+ .totlen = cpu_to_je32(c->cleanmarker_size),
40925+ .hdr_crc = cpu_to_je32(0)
40926 };
40927
40928 jffs2_prealloc_raw_node_refs(c, jeb, 1);
40929diff -urNp linux-3.0.4/fs/jffs2/wbuf.c linux-3.0.4/fs/jffs2/wbuf.c
40930--- linux-3.0.4/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
40931+++ linux-3.0.4/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
40932@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
40933 {
40934 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
40935 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40936- .totlen = constant_cpu_to_je32(8)
40937+ .totlen = constant_cpu_to_je32(8),
40938+ .hdr_crc = constant_cpu_to_je32(0)
40939 };
40940
40941 /*
40942diff -urNp linux-3.0.4/fs/jffs2/xattr.c linux-3.0.4/fs/jffs2/xattr.c
40943--- linux-3.0.4/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
40944+++ linux-3.0.4/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
40945@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
40946
40947 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
40948
40949+ pax_track_stack();
40950+
40951 /* Phase.1 : Merge same xref */
40952 for (i=0; i < XREF_TMPHASH_SIZE; i++)
40953 xref_tmphash[i] = NULL;
40954diff -urNp linux-3.0.4/fs/jfs/super.c linux-3.0.4/fs/jfs/super.c
40955--- linux-3.0.4/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
40956+++ linux-3.0.4/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
40957@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
40958
40959 jfs_inode_cachep =
40960 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
40961- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
40962+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
40963 init_once);
40964 if (jfs_inode_cachep == NULL)
40965 return -ENOMEM;
40966diff -urNp linux-3.0.4/fs/Kconfig.binfmt linux-3.0.4/fs/Kconfig.binfmt
40967--- linux-3.0.4/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
40968+++ linux-3.0.4/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
40969@@ -86,7 +86,7 @@ config HAVE_AOUT
40970
40971 config BINFMT_AOUT
40972 tristate "Kernel support for a.out and ECOFF binaries"
40973- depends on HAVE_AOUT
40974+ depends on HAVE_AOUT && BROKEN
40975 ---help---
40976 A.out (Assembler.OUTput) is a set of formats for libraries and
40977 executables used in the earliest versions of UNIX. Linux used
40978diff -urNp linux-3.0.4/fs/libfs.c linux-3.0.4/fs/libfs.c
40979--- linux-3.0.4/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
40980+++ linux-3.0.4/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
40981@@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
40982
40983 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
40984 struct dentry *next;
40985+ char d_name[sizeof(next->d_iname)];
40986+ const unsigned char *name;
40987+
40988 next = list_entry(p, struct dentry, d_u.d_child);
40989 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
40990 if (!simple_positive(next)) {
40991@@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
40992
40993 spin_unlock(&next->d_lock);
40994 spin_unlock(&dentry->d_lock);
40995- if (filldir(dirent, next->d_name.name,
40996+ name = next->d_name.name;
40997+ if (name == next->d_iname) {
40998+ memcpy(d_name, name, next->d_name.len);
40999+ name = d_name;
41000+ }
41001+ if (filldir(dirent, name,
41002 next->d_name.len, filp->f_pos,
41003 next->d_inode->i_ino,
41004 dt_type(next->d_inode)) < 0)
41005diff -urNp linux-3.0.4/fs/lockd/clntproc.c linux-3.0.4/fs/lockd/clntproc.c
41006--- linux-3.0.4/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
41007+++ linux-3.0.4/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
41008@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
41009 /*
41010 * Cookie counter for NLM requests
41011 */
41012-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
41013+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
41014
41015 void nlmclnt_next_cookie(struct nlm_cookie *c)
41016 {
41017- u32 cookie = atomic_inc_return(&nlm_cookie);
41018+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
41019
41020 memcpy(c->data, &cookie, 4);
41021 c->len=4;
41022@@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
41023 struct nlm_rqst reqst, *req;
41024 int status;
41025
41026+ pax_track_stack();
41027+
41028 req = &reqst;
41029 memset(req, 0, sizeof(*req));
41030 locks_init_lock(&req->a_args.lock.fl);
41031diff -urNp linux-3.0.4/fs/locks.c linux-3.0.4/fs/locks.c
41032--- linux-3.0.4/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
41033+++ linux-3.0.4/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
41034@@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
41035 return;
41036
41037 if (filp->f_op && filp->f_op->flock) {
41038- struct file_lock fl = {
41039+ struct file_lock flock = {
41040 .fl_pid = current->tgid,
41041 .fl_file = filp,
41042 .fl_flags = FL_FLOCK,
41043 .fl_type = F_UNLCK,
41044 .fl_end = OFFSET_MAX,
41045 };
41046- filp->f_op->flock(filp, F_SETLKW, &fl);
41047- if (fl.fl_ops && fl.fl_ops->fl_release_private)
41048- fl.fl_ops->fl_release_private(&fl);
41049+ filp->f_op->flock(filp, F_SETLKW, &flock);
41050+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
41051+ flock.fl_ops->fl_release_private(&flock);
41052 }
41053
41054 lock_flocks();
41055diff -urNp linux-3.0.4/fs/logfs/super.c linux-3.0.4/fs/logfs/super.c
41056--- linux-3.0.4/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
41057+++ linux-3.0.4/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
41058@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
41059 struct logfs_disk_super _ds1, *ds1 = &_ds1;
41060 int err, valid0, valid1;
41061
41062+ pax_track_stack();
41063+
41064 /* read first superblock */
41065 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
41066 if (err)
41067diff -urNp linux-3.0.4/fs/namei.c linux-3.0.4/fs/namei.c
41068--- linux-3.0.4/fs/namei.c 2011-07-21 22:17:23.000000000 -0400
41069+++ linux-3.0.4/fs/namei.c 2011-08-23 21:48:14.000000000 -0400
41070@@ -237,21 +237,31 @@ int generic_permission(struct inode *ino
41071 return ret;
41072
41073 /*
41074- * Read/write DACs are always overridable.
41075- * Executable DACs are overridable for all directories and
41076- * for non-directories that have least one exec bit set.
41077+ * Searching includes executable on directories, else just read.
41078 */
41079- if (!(mask & MAY_EXEC) || execute_ok(inode))
41080- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
41081+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
41082+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
41083+#ifdef CONFIG_GRKERNSEC
41084+ if (flags & IPERM_FLAG_RCU)
41085+ return -ECHILD;
41086+#endif
41087+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
41088 return 0;
41089+ }
41090
41091 /*
41092- * Searching includes executable on directories, else just read.
41093+ * Read/write DACs are always overridable.
41094+ * Executable DACs are overridable for all directories and
41095+ * for non-directories that have least one exec bit set.
41096 */
41097- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
41098- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
41099- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
41100+ if (!(mask & MAY_EXEC) || execute_ok(inode)) {
41101+#ifdef CONFIG_GRKERNSEC
41102+ if (flags & IPERM_FLAG_RCU)
41103+ return -ECHILD;
41104+#endif
41105+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
41106 return 0;
41107+ }
41108
41109 return -EACCES;
41110 }
41111@@ -547,6 +557,9 @@ static int complete_walk(struct nameidat
41112 br_read_unlock(vfsmount_lock);
41113 }
41114
41115+ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
41116+ return -ENOENT;
41117+
41118 if (likely(!(nd->flags & LOOKUP_JUMPED)))
41119 return 0;
41120
41121@@ -593,9 +606,16 @@ static inline int exec_permission(struct
41122 if (ret == -ECHILD)
41123 return ret;
41124
41125- if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
41126- ns_capable(ns, CAP_DAC_READ_SEARCH))
41127+ if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
41128 goto ok;
41129+ else {
41130+#ifdef CONFIG_GRKERNSEC
41131+ if (flags & IPERM_FLAG_RCU)
41132+ return -ECHILD;
41133+#endif
41134+ if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
41135+ goto ok;
41136+ }
41137
41138 return ret;
41139 ok:
41140@@ -703,11 +723,19 @@ follow_link(struct path *link, struct na
41141 return error;
41142 }
41143
41144+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
41145+ dentry->d_inode, dentry, nd->path.mnt)) {
41146+ error = -EACCES;
41147+ *p = ERR_PTR(error); /* no ->put_link(), please */
41148+ path_put(&nd->path);
41149+ return error;
41150+ }
41151+
41152 nd->last_type = LAST_BIND;
41153 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
41154 error = PTR_ERR(*p);
41155 if (!IS_ERR(*p)) {
41156- char *s = nd_get_link(nd);
41157+ const char *s = nd_get_link(nd);
41158 error = 0;
41159 if (s)
41160 error = __vfs_follow_link(nd, s);
41161@@ -1625,6 +1653,9 @@ static int do_path_lookup(int dfd, const
41162 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
41163
41164 if (likely(!retval)) {
41165+ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
41166+ return -ENOENT;
41167+
41168 if (unlikely(!audit_dummy_context())) {
41169 if (nd->path.dentry && nd->inode)
41170 audit_inode(name, nd->path.dentry);
41171@@ -1935,6 +1966,30 @@ int vfs_create(struct inode *dir, struct
41172 return error;
41173 }
41174
41175+/*
41176+ * Note that while the flag value (low two bits) for sys_open means:
41177+ * 00 - read-only
41178+ * 01 - write-only
41179+ * 10 - read-write
41180+ * 11 - special
41181+ * it is changed into
41182+ * 00 - no permissions needed
41183+ * 01 - read-permission
41184+ * 10 - write-permission
41185+ * 11 - read-write
41186+ * for the internal routines (ie open_namei()/follow_link() etc)
41187+ * This is more logical, and also allows the 00 "no perm needed"
41188+ * to be used for symlinks (where the permissions are checked
41189+ * later).
41190+ *
41191+*/
41192+static inline int open_to_namei_flags(int flag)
41193+{
41194+ if ((flag+1) & O_ACCMODE)
41195+ flag++;
41196+ return flag;
41197+}
41198+
41199 static int may_open(struct path *path, int acc_mode, int flag)
41200 {
41201 struct dentry *dentry = path->dentry;
41202@@ -1987,7 +2042,27 @@ static int may_open(struct path *path, i
41203 /*
41204 * Ensure there are no outstanding leases on the file.
41205 */
41206- return break_lease(inode, flag);
41207+ error = break_lease(inode, flag);
41208+
41209+ if (error)
41210+ return error;
41211+
41212+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
41213+ error = -EPERM;
41214+ goto exit;
41215+ }
41216+
41217+ if (gr_handle_rawio(inode)) {
41218+ error = -EPERM;
41219+ goto exit;
41220+ }
41221+
41222+ if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
41223+ error = -EACCES;
41224+ goto exit;
41225+ }
41226+exit:
41227+ return error;
41228 }
41229
41230 static int handle_truncate(struct file *filp)
41231@@ -2013,30 +2088,6 @@ static int handle_truncate(struct file *
41232 }
41233
41234 /*
41235- * Note that while the flag value (low two bits) for sys_open means:
41236- * 00 - read-only
41237- * 01 - write-only
41238- * 10 - read-write
41239- * 11 - special
41240- * it is changed into
41241- * 00 - no permissions needed
41242- * 01 - read-permission
41243- * 10 - write-permission
41244- * 11 - read-write
41245- * for the internal routines (ie open_namei()/follow_link() etc)
41246- * This is more logical, and also allows the 00 "no perm needed"
41247- * to be used for symlinks (where the permissions are checked
41248- * later).
41249- *
41250-*/
41251-static inline int open_to_namei_flags(int flag)
41252-{
41253- if ((flag+1) & O_ACCMODE)
41254- flag++;
41255- return flag;
41256-}
41257-
41258-/*
41259 * Handle the last step of open()
41260 */
41261 static struct file *do_last(struct nameidata *nd, struct path *path,
41262@@ -2045,6 +2096,7 @@ static struct file *do_last(struct namei
41263 struct dentry *dir = nd->path.dentry;
41264 struct dentry *dentry;
41265 int open_flag = op->open_flag;
41266+ int flag = open_to_namei_flags(open_flag);
41267 int will_truncate = open_flag & O_TRUNC;
41268 int want_write = 0;
41269 int acc_mode = op->acc_mode;
41270@@ -2132,6 +2184,12 @@ static struct file *do_last(struct namei
41271 /* Negative dentry, just create the file */
41272 if (!dentry->d_inode) {
41273 int mode = op->mode;
41274+
41275+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
41276+ error = -EACCES;
41277+ goto exit_mutex_unlock;
41278+ }
41279+
41280 if (!IS_POSIXACL(dir->d_inode))
41281 mode &= ~current_umask();
41282 /*
41283@@ -2155,6 +2213,8 @@ static struct file *do_last(struct namei
41284 error = vfs_create(dir->d_inode, dentry, mode, nd);
41285 if (error)
41286 goto exit_mutex_unlock;
41287+ else
41288+ gr_handle_create(path->dentry, path->mnt);
41289 mutex_unlock(&dir->d_inode->i_mutex);
41290 dput(nd->path.dentry);
41291 nd->path.dentry = dentry;
41292@@ -2164,6 +2224,14 @@ static struct file *do_last(struct namei
41293 /*
41294 * It already exists.
41295 */
41296+
41297+ /* only check if O_CREAT is specified, all other checks need to go
41298+ into may_open */
41299+ if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
41300+ error = -EACCES;
41301+ goto exit_mutex_unlock;
41302+ }
41303+
41304 mutex_unlock(&dir->d_inode->i_mutex);
41305 audit_inode(pathname, path->dentry);
41306
41307@@ -2450,6 +2518,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41308 error = may_mknod(mode);
41309 if (error)
41310 goto out_dput;
41311+
41312+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
41313+ error = -EPERM;
41314+ goto out_dput;
41315+ }
41316+
41317+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
41318+ error = -EACCES;
41319+ goto out_dput;
41320+ }
41321+
41322 error = mnt_want_write(nd.path.mnt);
41323 if (error)
41324 goto out_dput;
41325@@ -2470,6 +2549,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41326 }
41327 out_drop_write:
41328 mnt_drop_write(nd.path.mnt);
41329+
41330+ if (!error)
41331+ gr_handle_create(dentry, nd.path.mnt);
41332 out_dput:
41333 dput(dentry);
41334 out_unlock:
41335@@ -2522,6 +2604,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41336 if (IS_ERR(dentry))
41337 goto out_unlock;
41338
41339+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
41340+ error = -EACCES;
41341+ goto out_dput;
41342+ }
41343+
41344 if (!IS_POSIXACL(nd.path.dentry->d_inode))
41345 mode &= ~current_umask();
41346 error = mnt_want_write(nd.path.mnt);
41347@@ -2533,6 +2620,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41348 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
41349 out_drop_write:
41350 mnt_drop_write(nd.path.mnt);
41351+
41352+ if (!error)
41353+ gr_handle_create(dentry, nd.path.mnt);
41354+
41355 out_dput:
41356 dput(dentry);
41357 out_unlock:
41358@@ -2613,6 +2704,8 @@ static long do_rmdir(int dfd, const char
41359 char * name;
41360 struct dentry *dentry;
41361 struct nameidata nd;
41362+ ino_t saved_ino = 0;
41363+ dev_t saved_dev = 0;
41364
41365 error = user_path_parent(dfd, pathname, &nd, &name);
41366 if (error)
41367@@ -2641,6 +2734,17 @@ static long do_rmdir(int dfd, const char
41368 error = -ENOENT;
41369 goto exit3;
41370 }
41371+
41372+ if (dentry->d_inode->i_nlink <= 1) {
41373+ saved_ino = dentry->d_inode->i_ino;
41374+ saved_dev = gr_get_dev_from_dentry(dentry);
41375+ }
41376+
41377+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
41378+ error = -EACCES;
41379+ goto exit3;
41380+ }
41381+
41382 error = mnt_want_write(nd.path.mnt);
41383 if (error)
41384 goto exit3;
41385@@ -2648,6 +2752,8 @@ static long do_rmdir(int dfd, const char
41386 if (error)
41387 goto exit4;
41388 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
41389+ if (!error && (saved_dev || saved_ino))
41390+ gr_handle_delete(saved_ino, saved_dev);
41391 exit4:
41392 mnt_drop_write(nd.path.mnt);
41393 exit3:
41394@@ -2710,6 +2816,8 @@ static long do_unlinkat(int dfd, const c
41395 struct dentry *dentry;
41396 struct nameidata nd;
41397 struct inode *inode = NULL;
41398+ ino_t saved_ino = 0;
41399+ dev_t saved_dev = 0;
41400
41401 error = user_path_parent(dfd, pathname, &nd, &name);
41402 if (error)
41403@@ -2732,6 +2840,16 @@ static long do_unlinkat(int dfd, const c
41404 if (!inode)
41405 goto slashes;
41406 ihold(inode);
41407+
41408+ if (inode->i_nlink <= 1) {
41409+ saved_ino = inode->i_ino;
41410+ saved_dev = gr_get_dev_from_dentry(dentry);
41411+ }
41412+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
41413+ error = -EACCES;
41414+ goto exit2;
41415+ }
41416+
41417 error = mnt_want_write(nd.path.mnt);
41418 if (error)
41419 goto exit2;
41420@@ -2739,6 +2857,8 @@ static long do_unlinkat(int dfd, const c
41421 if (error)
41422 goto exit3;
41423 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
41424+ if (!error && (saved_ino || saved_dev))
41425+ gr_handle_delete(saved_ino, saved_dev);
41426 exit3:
41427 mnt_drop_write(nd.path.mnt);
41428 exit2:
41429@@ -2816,6 +2936,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
41430 if (IS_ERR(dentry))
41431 goto out_unlock;
41432
41433+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
41434+ error = -EACCES;
41435+ goto out_dput;
41436+ }
41437+
41438 error = mnt_want_write(nd.path.mnt);
41439 if (error)
41440 goto out_dput;
41441@@ -2823,6 +2948,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
41442 if (error)
41443 goto out_drop_write;
41444 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
41445+ if (!error)
41446+ gr_handle_create(dentry, nd.path.mnt);
41447 out_drop_write:
41448 mnt_drop_write(nd.path.mnt);
41449 out_dput:
41450@@ -2931,6 +3058,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41451 error = PTR_ERR(new_dentry);
41452 if (IS_ERR(new_dentry))
41453 goto out_unlock;
41454+
41455+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
41456+ old_path.dentry->d_inode,
41457+ old_path.dentry->d_inode->i_mode, to)) {
41458+ error = -EACCES;
41459+ goto out_dput;
41460+ }
41461+
41462+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
41463+ old_path.dentry, old_path.mnt, to)) {
41464+ error = -EACCES;
41465+ goto out_dput;
41466+ }
41467+
41468 error = mnt_want_write(nd.path.mnt);
41469 if (error)
41470 goto out_dput;
41471@@ -2938,6 +3079,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41472 if (error)
41473 goto out_drop_write;
41474 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
41475+ if (!error)
41476+ gr_handle_create(new_dentry, nd.path.mnt);
41477 out_drop_write:
41478 mnt_drop_write(nd.path.mnt);
41479 out_dput:
41480@@ -3113,6 +3256,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41481 char *to;
41482 int error;
41483
41484+ pax_track_stack();
41485+
41486 error = user_path_parent(olddfd, oldname, &oldnd, &from);
41487 if (error)
41488 goto exit;
41489@@ -3169,6 +3314,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41490 if (new_dentry == trap)
41491 goto exit5;
41492
41493+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
41494+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
41495+ to);
41496+ if (error)
41497+ goto exit5;
41498+
41499 error = mnt_want_write(oldnd.path.mnt);
41500 if (error)
41501 goto exit5;
41502@@ -3178,6 +3329,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41503 goto exit6;
41504 error = vfs_rename(old_dir->d_inode, old_dentry,
41505 new_dir->d_inode, new_dentry);
41506+ if (!error)
41507+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
41508+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
41509 exit6:
41510 mnt_drop_write(oldnd.path.mnt);
41511 exit5:
41512@@ -3203,6 +3357,8 @@ SYSCALL_DEFINE2(rename, const char __use
41513
41514 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
41515 {
41516+ char tmpbuf[64];
41517+ const char *newlink;
41518 int len;
41519
41520 len = PTR_ERR(link);
41521@@ -3212,7 +3368,14 @@ int vfs_readlink(struct dentry *dentry,
41522 len = strlen(link);
41523 if (len > (unsigned) buflen)
41524 len = buflen;
41525- if (copy_to_user(buffer, link, len))
41526+
41527+ if (len < sizeof(tmpbuf)) {
41528+ memcpy(tmpbuf, link, len);
41529+ newlink = tmpbuf;
41530+ } else
41531+ newlink = link;
41532+
41533+ if (copy_to_user(buffer, newlink, len))
41534 len = -EFAULT;
41535 out:
41536 return len;
41537diff -urNp linux-3.0.4/fs/namespace.c linux-3.0.4/fs/namespace.c
41538--- linux-3.0.4/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
41539+++ linux-3.0.4/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
41540@@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
41541 if (!(sb->s_flags & MS_RDONLY))
41542 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
41543 up_write(&sb->s_umount);
41544+
41545+ gr_log_remount(mnt->mnt_devname, retval);
41546+
41547 return retval;
41548 }
41549
41550@@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
41551 br_write_unlock(vfsmount_lock);
41552 up_write(&namespace_sem);
41553 release_mounts(&umount_list);
41554+
41555+ gr_log_unmount(mnt->mnt_devname, retval);
41556+
41557 return retval;
41558 }
41559
41560@@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
41561 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
41562 MS_STRICTATIME);
41563
41564+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
41565+ retval = -EPERM;
41566+ goto dput_out;
41567+ }
41568+
41569+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
41570+ retval = -EPERM;
41571+ goto dput_out;
41572+ }
41573+
41574 if (flags & MS_REMOUNT)
41575 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
41576 data_page);
41577@@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
41578 dev_name, data_page);
41579 dput_out:
41580 path_put(&path);
41581+
41582+ gr_log_mount(dev_name, dir_name, retval);
41583+
41584 return retval;
41585 }
41586
41587@@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
41588 if (error)
41589 goto out2;
41590
41591+ if (gr_handle_chroot_pivot()) {
41592+ error = -EPERM;
41593+ goto out2;
41594+ }
41595+
41596 get_fs_root(current->fs, &root);
41597 error = lock_mount(&old);
41598 if (error)
41599diff -urNp linux-3.0.4/fs/ncpfs/dir.c linux-3.0.4/fs/ncpfs/dir.c
41600--- linux-3.0.4/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
41601+++ linux-3.0.4/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
41602@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
41603 int res, val = 0, len;
41604 __u8 __name[NCP_MAXPATHLEN + 1];
41605
41606+ pax_track_stack();
41607+
41608 if (dentry == dentry->d_sb->s_root)
41609 return 1;
41610
41611@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
41612 int error, res, len;
41613 __u8 __name[NCP_MAXPATHLEN + 1];
41614
41615+ pax_track_stack();
41616+
41617 error = -EIO;
41618 if (!ncp_conn_valid(server))
41619 goto finished;
41620@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
41621 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
41622 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
41623
41624+ pax_track_stack();
41625+
41626 ncp_age_dentry(server, dentry);
41627 len = sizeof(__name);
41628 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
41629@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
41630 int error, len;
41631 __u8 __name[NCP_MAXPATHLEN + 1];
41632
41633+ pax_track_stack();
41634+
41635 DPRINTK("ncp_mkdir: making %s/%s\n",
41636 dentry->d_parent->d_name.name, dentry->d_name.name);
41637
41638@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
41639 int old_len, new_len;
41640 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
41641
41642+ pax_track_stack();
41643+
41644 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
41645 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
41646 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
41647diff -urNp linux-3.0.4/fs/ncpfs/inode.c linux-3.0.4/fs/ncpfs/inode.c
41648--- linux-3.0.4/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
41649+++ linux-3.0.4/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
41650@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
41651 #endif
41652 struct ncp_entry_info finfo;
41653
41654+ pax_track_stack();
41655+
41656 memset(&data, 0, sizeof(data));
41657 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
41658 if (!server)
41659diff -urNp linux-3.0.4/fs/nfs/inode.c linux-3.0.4/fs/nfs/inode.c
41660--- linux-3.0.4/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
41661+++ linux-3.0.4/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
41662@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
41663 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
41664 nfsi->attrtimeo_timestamp = jiffies;
41665
41666- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
41667+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
41668 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
41669 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
41670 else
41671@@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
41672 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
41673 }
41674
41675-static atomic_long_t nfs_attr_generation_counter;
41676+static atomic_long_unchecked_t nfs_attr_generation_counter;
41677
41678 static unsigned long nfs_read_attr_generation_counter(void)
41679 {
41680- return atomic_long_read(&nfs_attr_generation_counter);
41681+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
41682 }
41683
41684 unsigned long nfs_inc_attr_generation_counter(void)
41685 {
41686- return atomic_long_inc_return(&nfs_attr_generation_counter);
41687+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
41688 }
41689
41690 void nfs_fattr_init(struct nfs_fattr *fattr)
41691diff -urNp linux-3.0.4/fs/nfsd/nfs4state.c linux-3.0.4/fs/nfsd/nfs4state.c
41692--- linux-3.0.4/fs/nfsd/nfs4state.c 2011-09-02 18:11:21.000000000 -0400
41693+++ linux-3.0.4/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
41694@@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
41695 unsigned int strhashval;
41696 int err;
41697
41698+ pax_track_stack();
41699+
41700 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
41701 (long long) lock->lk_offset,
41702 (long long) lock->lk_length);
41703diff -urNp linux-3.0.4/fs/nfsd/nfs4xdr.c linux-3.0.4/fs/nfsd/nfs4xdr.c
41704--- linux-3.0.4/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
41705+++ linux-3.0.4/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
41706@@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
41707 .dentry = dentry,
41708 };
41709
41710+ pax_track_stack();
41711+
41712 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
41713 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
41714 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
41715diff -urNp linux-3.0.4/fs/nfsd/vfs.c linux-3.0.4/fs/nfsd/vfs.c
41716--- linux-3.0.4/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
41717+++ linux-3.0.4/fs/nfsd/vfs.c 2011-08-23 21:47:56.000000000 -0400
41718@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
41719 } else {
41720 oldfs = get_fs();
41721 set_fs(KERNEL_DS);
41722- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
41723+ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
41724 set_fs(oldfs);
41725 }
41726
41727@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
41728
41729 /* Write the data. */
41730 oldfs = get_fs(); set_fs(KERNEL_DS);
41731- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
41732+ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
41733 set_fs(oldfs);
41734 if (host_err < 0)
41735 goto out_nfserr;
41736@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
41737 */
41738
41739 oldfs = get_fs(); set_fs(KERNEL_DS);
41740- host_err = inode->i_op->readlink(dentry, buf, *lenp);
41741+ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
41742 set_fs(oldfs);
41743
41744 if (host_err < 0)
41745diff -urNp linux-3.0.4/fs/notify/fanotify/fanotify_user.c linux-3.0.4/fs/notify/fanotify/fanotify_user.c
41746--- linux-3.0.4/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
41747+++ linux-3.0.4/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
41748@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
41749 goto out_close_fd;
41750
41751 ret = -EFAULT;
41752- if (copy_to_user(buf, &fanotify_event_metadata,
41753+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
41754+ copy_to_user(buf, &fanotify_event_metadata,
41755 fanotify_event_metadata.event_len))
41756 goto out_kill_access_response;
41757
41758diff -urNp linux-3.0.4/fs/notify/notification.c linux-3.0.4/fs/notify/notification.c
41759--- linux-3.0.4/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
41760+++ linux-3.0.4/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
41761@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
41762 * get set to 0 so it will never get 'freed'
41763 */
41764 static struct fsnotify_event *q_overflow_event;
41765-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41766+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41767
41768 /**
41769 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
41770@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
41771 */
41772 u32 fsnotify_get_cookie(void)
41773 {
41774- return atomic_inc_return(&fsnotify_sync_cookie);
41775+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
41776 }
41777 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
41778
41779diff -urNp linux-3.0.4/fs/ntfs/dir.c linux-3.0.4/fs/ntfs/dir.c
41780--- linux-3.0.4/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
41781+++ linux-3.0.4/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
41782@@ -1329,7 +1329,7 @@ find_next_index_buffer:
41783 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
41784 ~(s64)(ndir->itype.index.block_size - 1)));
41785 /* Bounds checks. */
41786- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41787+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41788 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
41789 "inode 0x%lx or driver bug.", vdir->i_ino);
41790 goto err_out;
41791diff -urNp linux-3.0.4/fs/ntfs/file.c linux-3.0.4/fs/ntfs/file.c
41792--- linux-3.0.4/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
41793+++ linux-3.0.4/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
41794@@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
41795 #endif /* NTFS_RW */
41796 };
41797
41798-const struct file_operations ntfs_empty_file_ops = {};
41799+const struct file_operations ntfs_empty_file_ops __read_only;
41800
41801-const struct inode_operations ntfs_empty_inode_ops = {};
41802+const struct inode_operations ntfs_empty_inode_ops __read_only;
41803diff -urNp linux-3.0.4/fs/ocfs2/localalloc.c linux-3.0.4/fs/ocfs2/localalloc.c
41804--- linux-3.0.4/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
41805+++ linux-3.0.4/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
41806@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
41807 goto bail;
41808 }
41809
41810- atomic_inc(&osb->alloc_stats.moves);
41811+ atomic_inc_unchecked(&osb->alloc_stats.moves);
41812
41813 bail:
41814 if (handle)
41815diff -urNp linux-3.0.4/fs/ocfs2/namei.c linux-3.0.4/fs/ocfs2/namei.c
41816--- linux-3.0.4/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
41817+++ linux-3.0.4/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
41818@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
41819 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
41820 struct ocfs2_dir_lookup_result target_insert = { NULL, };
41821
41822+ pax_track_stack();
41823+
41824 /* At some point it might be nice to break this function up a
41825 * bit. */
41826
41827diff -urNp linux-3.0.4/fs/ocfs2/ocfs2.h linux-3.0.4/fs/ocfs2/ocfs2.h
41828--- linux-3.0.4/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
41829+++ linux-3.0.4/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
41830@@ -235,11 +235,11 @@ enum ocfs2_vol_state
41831
41832 struct ocfs2_alloc_stats
41833 {
41834- atomic_t moves;
41835- atomic_t local_data;
41836- atomic_t bitmap_data;
41837- atomic_t bg_allocs;
41838- atomic_t bg_extends;
41839+ atomic_unchecked_t moves;
41840+ atomic_unchecked_t local_data;
41841+ atomic_unchecked_t bitmap_data;
41842+ atomic_unchecked_t bg_allocs;
41843+ atomic_unchecked_t bg_extends;
41844 };
41845
41846 enum ocfs2_local_alloc_state
41847diff -urNp linux-3.0.4/fs/ocfs2/suballoc.c linux-3.0.4/fs/ocfs2/suballoc.c
41848--- linux-3.0.4/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
41849+++ linux-3.0.4/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
41850@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
41851 mlog_errno(status);
41852 goto bail;
41853 }
41854- atomic_inc(&osb->alloc_stats.bg_extends);
41855+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
41856
41857 /* You should never ask for this much metadata */
41858 BUG_ON(bits_wanted >
41859@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
41860 mlog_errno(status);
41861 goto bail;
41862 }
41863- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41864+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41865
41866 *suballoc_loc = res.sr_bg_blkno;
41867 *suballoc_bit_start = res.sr_bit_offset;
41868@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
41869 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
41870 res->sr_bits);
41871
41872- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41873+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41874
41875 BUG_ON(res->sr_bits != 1);
41876
41877@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
41878 mlog_errno(status);
41879 goto bail;
41880 }
41881- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41882+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41883
41884 BUG_ON(res.sr_bits != 1);
41885
41886@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
41887 cluster_start,
41888 num_clusters);
41889 if (!status)
41890- atomic_inc(&osb->alloc_stats.local_data);
41891+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
41892 } else {
41893 if (min_clusters > (osb->bitmap_cpg - 1)) {
41894 /* The only paths asking for contiguousness
41895@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
41896 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
41897 res.sr_bg_blkno,
41898 res.sr_bit_offset);
41899- atomic_inc(&osb->alloc_stats.bitmap_data);
41900+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
41901 *num_clusters = res.sr_bits;
41902 }
41903 }
41904diff -urNp linux-3.0.4/fs/ocfs2/super.c linux-3.0.4/fs/ocfs2/super.c
41905--- linux-3.0.4/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
41906+++ linux-3.0.4/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
41907@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
41908 "%10s => GlobalAllocs: %d LocalAllocs: %d "
41909 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
41910 "Stats",
41911- atomic_read(&osb->alloc_stats.bitmap_data),
41912- atomic_read(&osb->alloc_stats.local_data),
41913- atomic_read(&osb->alloc_stats.bg_allocs),
41914- atomic_read(&osb->alloc_stats.moves),
41915- atomic_read(&osb->alloc_stats.bg_extends));
41916+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
41917+ atomic_read_unchecked(&osb->alloc_stats.local_data),
41918+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
41919+ atomic_read_unchecked(&osb->alloc_stats.moves),
41920+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
41921
41922 out += snprintf(buf + out, len - out,
41923 "%10s => State: %u Descriptor: %llu Size: %u bits "
41924@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
41925 spin_lock_init(&osb->osb_xattr_lock);
41926 ocfs2_init_steal_slots(osb);
41927
41928- atomic_set(&osb->alloc_stats.moves, 0);
41929- atomic_set(&osb->alloc_stats.local_data, 0);
41930- atomic_set(&osb->alloc_stats.bitmap_data, 0);
41931- atomic_set(&osb->alloc_stats.bg_allocs, 0);
41932- atomic_set(&osb->alloc_stats.bg_extends, 0);
41933+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
41934+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
41935+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
41936+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
41937+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
41938
41939 /* Copy the blockcheck stats from the superblock probe */
41940 osb->osb_ecc_stats = *stats;
41941diff -urNp linux-3.0.4/fs/ocfs2/symlink.c linux-3.0.4/fs/ocfs2/symlink.c
41942--- linux-3.0.4/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
41943+++ linux-3.0.4/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
41944@@ -142,7 +142,7 @@ bail:
41945
41946 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
41947 {
41948- char *link = nd_get_link(nd);
41949+ const char *link = nd_get_link(nd);
41950 if (!IS_ERR(link))
41951 kfree(link);
41952 }
41953diff -urNp linux-3.0.4/fs/open.c linux-3.0.4/fs/open.c
41954--- linux-3.0.4/fs/open.c 2011-07-21 22:17:23.000000000 -0400
41955+++ linux-3.0.4/fs/open.c 2011-09-14 09:16:46.000000000 -0400
41956@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
41957 error = locks_verify_truncate(inode, NULL, length);
41958 if (!error)
41959 error = security_path_truncate(&path);
41960+
41961+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
41962+ error = -EACCES;
41963+
41964 if (!error)
41965 error = do_truncate(path.dentry, length, 0, NULL);
41966
41967@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
41968 if (__mnt_is_readonly(path.mnt))
41969 res = -EROFS;
41970
41971+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
41972+ res = -EACCES;
41973+
41974 out_path_release:
41975 path_put(&path);
41976 out:
41977@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
41978 if (error)
41979 goto dput_and_out;
41980
41981+ gr_log_chdir(path.dentry, path.mnt);
41982+
41983 set_fs_pwd(current->fs, &path);
41984
41985 dput_and_out:
41986@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
41987 goto out_putf;
41988
41989 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
41990+
41991+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
41992+ error = -EPERM;
41993+
41994+ if (!error)
41995+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
41996+
41997 if (!error)
41998 set_fs_pwd(current->fs, &file->f_path);
41999 out_putf:
42000@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __use
42001 if (error)
42002 goto dput_and_out;
42003
42004+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
42005+ goto dput_and_out;
42006+
42007 set_fs_root(current->fs, &path);
42008+
42009+ gr_handle_chroot_chdir(&path);
42010+
42011 error = 0;
42012 dput_and_out:
42013 path_put(&path);
42014@@ -466,12 +488,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
42015 err = mnt_want_write_file(file);
42016 if (err)
42017 goto out_putf;
42018+
42019 mutex_lock(&inode->i_mutex);
42020+
42021+ if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
42022+ err = -EACCES;
42023+ goto out_unlock;
42024+ }
42025+
42026 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
42027 if (err)
42028 goto out_unlock;
42029 if (mode == (mode_t) -1)
42030 mode = inode->i_mode;
42031+
42032+ if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
42033+ err = -EACCES;
42034+ goto out_unlock;
42035+ }
42036+
42037 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42038 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42039 err = notify_change(dentry, &newattrs);
42040@@ -499,12 +534,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
42041 error = mnt_want_write(path.mnt);
42042 if (error)
42043 goto dput_and_out;
42044+
42045 mutex_lock(&inode->i_mutex);
42046+
42047+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
42048+ error = -EACCES;
42049+ goto out_unlock;
42050+ }
42051+
42052 error = security_path_chmod(path.dentry, path.mnt, mode);
42053 if (error)
42054 goto out_unlock;
42055 if (mode == (mode_t) -1)
42056 mode = inode->i_mode;
42057+
42058+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
42059+ error = -EACCES;
42060+ goto out_unlock;
42061+ }
42062+
42063 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42064 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42065 error = notify_change(path.dentry, &newattrs);
42066@@ -528,6 +576,9 @@ static int chown_common(struct path *pat
42067 int error;
42068 struct iattr newattrs;
42069
42070+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
42071+ return -EACCES;
42072+
42073 newattrs.ia_valid = ATTR_CTIME;
42074 if (user != (uid_t) -1) {
42075 newattrs.ia_valid |= ATTR_UID;
42076@@ -998,7 +1049,10 @@ long do_sys_open(int dfd, const char __u
42077 if (!IS_ERR(tmp)) {
42078 fd = get_unused_fd_flags(flags);
42079 if (fd >= 0) {
42080- struct file *f = do_filp_open(dfd, tmp, &op, lookup);
42081+ struct file *f;
42082+ /* don't allow to be set by userland */
42083+ flags &= ~FMODE_GREXEC;
42084+ f = do_filp_open(dfd, tmp, &op, lookup);
42085 if (IS_ERR(f)) {
42086 put_unused_fd(fd);
42087 fd = PTR_ERR(f);
42088diff -urNp linux-3.0.4/fs/partitions/ldm.c linux-3.0.4/fs/partitions/ldm.c
42089--- linux-3.0.4/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
42090+++ linux-3.0.4/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
42091@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
42092 ldm_error ("A VBLK claims to have %d parts.", num);
42093 return false;
42094 }
42095+
42096 if (rec >= num) {
42097 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
42098 return false;
42099@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
42100 goto found;
42101 }
42102
42103- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
42104+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
42105 if (!f) {
42106 ldm_crit ("Out of memory.");
42107 return false;
42108diff -urNp linux-3.0.4/fs/pipe.c linux-3.0.4/fs/pipe.c
42109--- linux-3.0.4/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
42110+++ linux-3.0.4/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
42111@@ -420,9 +420,9 @@ redo:
42112 }
42113 if (bufs) /* More to do? */
42114 continue;
42115- if (!pipe->writers)
42116+ if (!atomic_read(&pipe->writers))
42117 break;
42118- if (!pipe->waiting_writers) {
42119+ if (!atomic_read(&pipe->waiting_writers)) {
42120 /* syscall merging: Usually we must not sleep
42121 * if O_NONBLOCK is set, or if we got some data.
42122 * But if a writer sleeps in kernel space, then
42123@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
42124 mutex_lock(&inode->i_mutex);
42125 pipe = inode->i_pipe;
42126
42127- if (!pipe->readers) {
42128+ if (!atomic_read(&pipe->readers)) {
42129 send_sig(SIGPIPE, current, 0);
42130 ret = -EPIPE;
42131 goto out;
42132@@ -530,7 +530,7 @@ redo1:
42133 for (;;) {
42134 int bufs;
42135
42136- if (!pipe->readers) {
42137+ if (!atomic_read(&pipe->readers)) {
42138 send_sig(SIGPIPE, current, 0);
42139 if (!ret)
42140 ret = -EPIPE;
42141@@ -616,9 +616,9 @@ redo2:
42142 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42143 do_wakeup = 0;
42144 }
42145- pipe->waiting_writers++;
42146+ atomic_inc(&pipe->waiting_writers);
42147 pipe_wait(pipe);
42148- pipe->waiting_writers--;
42149+ atomic_dec(&pipe->waiting_writers);
42150 }
42151 out:
42152 mutex_unlock(&inode->i_mutex);
42153@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
42154 mask = 0;
42155 if (filp->f_mode & FMODE_READ) {
42156 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
42157- if (!pipe->writers && filp->f_version != pipe->w_counter)
42158+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
42159 mask |= POLLHUP;
42160 }
42161
42162@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
42163 * Most Unices do not set POLLERR for FIFOs but on Linux they
42164 * behave exactly like pipes for poll().
42165 */
42166- if (!pipe->readers)
42167+ if (!atomic_read(&pipe->readers))
42168 mask |= POLLERR;
42169 }
42170
42171@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
42172
42173 mutex_lock(&inode->i_mutex);
42174 pipe = inode->i_pipe;
42175- pipe->readers -= decr;
42176- pipe->writers -= decw;
42177+ atomic_sub(decr, &pipe->readers);
42178+ atomic_sub(decw, &pipe->writers);
42179
42180- if (!pipe->readers && !pipe->writers) {
42181+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
42182 free_pipe_info(inode);
42183 } else {
42184 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
42185@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
42186
42187 if (inode->i_pipe) {
42188 ret = 0;
42189- inode->i_pipe->readers++;
42190+ atomic_inc(&inode->i_pipe->readers);
42191 }
42192
42193 mutex_unlock(&inode->i_mutex);
42194@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
42195
42196 if (inode->i_pipe) {
42197 ret = 0;
42198- inode->i_pipe->writers++;
42199+ atomic_inc(&inode->i_pipe->writers);
42200 }
42201
42202 mutex_unlock(&inode->i_mutex);
42203@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
42204 if (inode->i_pipe) {
42205 ret = 0;
42206 if (filp->f_mode & FMODE_READ)
42207- inode->i_pipe->readers++;
42208+ atomic_inc(&inode->i_pipe->readers);
42209 if (filp->f_mode & FMODE_WRITE)
42210- inode->i_pipe->writers++;
42211+ atomic_inc(&inode->i_pipe->writers);
42212 }
42213
42214 mutex_unlock(&inode->i_mutex);
42215@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
42216 inode->i_pipe = NULL;
42217 }
42218
42219-static struct vfsmount *pipe_mnt __read_mostly;
42220+struct vfsmount *pipe_mnt __read_mostly;
42221
42222 /*
42223 * pipefs_dname() is called from d_path().
42224@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
42225 goto fail_iput;
42226 inode->i_pipe = pipe;
42227
42228- pipe->readers = pipe->writers = 1;
42229+ atomic_set(&pipe->readers, 1);
42230+ atomic_set(&pipe->writers, 1);
42231 inode->i_fop = &rdwr_pipefifo_fops;
42232
42233 /*
42234diff -urNp linux-3.0.4/fs/proc/array.c linux-3.0.4/fs/proc/array.c
42235--- linux-3.0.4/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
42236+++ linux-3.0.4/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
42237@@ -60,6 +60,7 @@
42238 #include <linux/tty.h>
42239 #include <linux/string.h>
42240 #include <linux/mman.h>
42241+#include <linux/grsecurity.h>
42242 #include <linux/proc_fs.h>
42243 #include <linux/ioport.h>
42244 #include <linux/uaccess.h>
42245@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
42246 seq_putc(m, '\n');
42247 }
42248
42249+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42250+static inline void task_pax(struct seq_file *m, struct task_struct *p)
42251+{
42252+ if (p->mm)
42253+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
42254+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
42255+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
42256+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
42257+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
42258+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
42259+ else
42260+ seq_printf(m, "PaX:\t-----\n");
42261+}
42262+#endif
42263+
42264 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
42265 struct pid *pid, struct task_struct *task)
42266 {
42267@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
42268 task_cpus_allowed(m, task);
42269 cpuset_task_status_allowed(m, task);
42270 task_context_switch_counts(m, task);
42271+
42272+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42273+ task_pax(m, task);
42274+#endif
42275+
42276+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
42277+ task_grsec_rbac(m, task);
42278+#endif
42279+
42280 return 0;
42281 }
42282
42283+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42284+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42285+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
42286+ _mm->pax_flags & MF_PAX_SEGMEXEC))
42287+#endif
42288+
42289 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
42290 struct pid *pid, struct task_struct *task, int whole)
42291 {
42292@@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
42293 cputime_t cutime, cstime, utime, stime;
42294 cputime_t cgtime, gtime;
42295 unsigned long rsslim = 0;
42296- char tcomm[sizeof(task->comm)];
42297+ char tcomm[sizeof(task->comm)] = { 0 };
42298 unsigned long flags;
42299
42300+ pax_track_stack();
42301+
42302 state = *get_task_state(task);
42303 vsize = eip = esp = 0;
42304 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
42305@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
42306 gtime = task->gtime;
42307 }
42308
42309+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42310+ if (PAX_RAND_FLAGS(mm)) {
42311+ eip = 0;
42312+ esp = 0;
42313+ wchan = 0;
42314+ }
42315+#endif
42316+#ifdef CONFIG_GRKERNSEC_HIDESYM
42317+ wchan = 0;
42318+ eip =0;
42319+ esp =0;
42320+#endif
42321+
42322 /* scale priority and nice values from timeslices to -20..20 */
42323 /* to make it look like a "normal" Unix priority/nice value */
42324 priority = task_prio(task);
42325@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
42326 vsize,
42327 mm ? get_mm_rss(mm) : 0,
42328 rsslim,
42329+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42330+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
42331+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
42332+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
42333+#else
42334 mm ? (permitted ? mm->start_code : 1) : 0,
42335 mm ? (permitted ? mm->end_code : 1) : 0,
42336 (permitted && mm) ? mm->start_stack : 0,
42337+#endif
42338 esp,
42339 eip,
42340 /* The signal information here is obsolete.
42341@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
42342
42343 return 0;
42344 }
42345+
42346+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42347+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
42348+{
42349+ u32 curr_ip = 0;
42350+ unsigned long flags;
42351+
42352+ if (lock_task_sighand(task, &flags)) {
42353+ curr_ip = task->signal->curr_ip;
42354+ unlock_task_sighand(task, &flags);
42355+ }
42356+
42357+ return sprintf(buffer, "%pI4\n", &curr_ip);
42358+}
42359+#endif
42360diff -urNp linux-3.0.4/fs/proc/base.c linux-3.0.4/fs/proc/base.c
42361--- linux-3.0.4/fs/proc/base.c 2011-09-02 18:11:21.000000000 -0400
42362+++ linux-3.0.4/fs/proc/base.c 2011-09-13 14:50:28.000000000 -0400
42363@@ -107,6 +107,22 @@ struct pid_entry {
42364 union proc_op op;
42365 };
42366
42367+struct getdents_callback {
42368+ struct linux_dirent __user * current_dir;
42369+ struct linux_dirent __user * previous;
42370+ struct file * file;
42371+ int count;
42372+ int error;
42373+};
42374+
42375+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
42376+ loff_t offset, u64 ino, unsigned int d_type)
42377+{
42378+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
42379+ buf->error = -EINVAL;
42380+ return 0;
42381+}
42382+
42383 #define NOD(NAME, MODE, IOP, FOP, OP) { \
42384 .name = (NAME), \
42385 .len = sizeof(NAME) - 1, \
42386@@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
42387 if (task == current)
42388 return mm;
42389
42390+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
42391+ return ERR_PTR(-EPERM);
42392+
42393 /*
42394 * If current is actively ptrace'ing, and would also be
42395 * permitted to freshly attach with ptrace now, permit it.
42396@@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
42397 if (!mm->arg_end)
42398 goto out_mm; /* Shh! No looking before we're done */
42399
42400+ if (gr_acl_handle_procpidmem(task))
42401+ goto out_mm;
42402+
42403 len = mm->arg_end - mm->arg_start;
42404
42405 if (len > PAGE_SIZE)
42406@@ -309,12 +331,28 @@ out:
42407 return res;
42408 }
42409
42410+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42411+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42412+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
42413+ _mm->pax_flags & MF_PAX_SEGMEXEC))
42414+#endif
42415+
42416 static int proc_pid_auxv(struct task_struct *task, char *buffer)
42417 {
42418 struct mm_struct *mm = mm_for_maps(task);
42419 int res = PTR_ERR(mm);
42420 if (mm && !IS_ERR(mm)) {
42421 unsigned int nwords = 0;
42422+
42423+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42424+ /* allow if we're currently ptracing this task */
42425+ if (PAX_RAND_FLAGS(mm) &&
42426+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
42427+ mmput(mm);
42428+ return 0;
42429+ }
42430+#endif
42431+
42432 do {
42433 nwords += 2;
42434 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
42435@@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
42436 }
42437
42438
42439-#ifdef CONFIG_KALLSYMS
42440+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42441 /*
42442 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
42443 * Returns the resolved symbol. If that fails, simply return the address.
42444@@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
42445 mutex_unlock(&task->signal->cred_guard_mutex);
42446 }
42447
42448-#ifdef CONFIG_STACKTRACE
42449+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42450
42451 #define MAX_STACK_TRACE_DEPTH 64
42452
42453@@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
42454 return count;
42455 }
42456
42457-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42458+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42459 static int proc_pid_syscall(struct task_struct *task, char *buffer)
42460 {
42461 long nr;
42462@@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
42463 /************************************************************************/
42464
42465 /* permission checks */
42466-static int proc_fd_access_allowed(struct inode *inode)
42467+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
42468 {
42469 struct task_struct *task;
42470 int allowed = 0;
42471@@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
42472 */
42473 task = get_proc_task(inode);
42474 if (task) {
42475- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42476+ if (log)
42477+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
42478+ else
42479+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42480 put_task_struct(task);
42481 }
42482 return allowed;
42483@@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
42484 if (!task)
42485 goto out_no_task;
42486
42487+ if (gr_acl_handle_procpidmem(task))
42488+ goto out;
42489+
42490 ret = -ENOMEM;
42491 page = (char *)__get_free_page(GFP_TEMPORARY);
42492 if (!page)
42493@@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
42494 path_put(&nd->path);
42495
42496 /* Are we allowed to snoop on the tasks file descriptors? */
42497- if (!proc_fd_access_allowed(inode))
42498+ if (!proc_fd_access_allowed(inode,0))
42499 goto out;
42500
42501 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
42502@@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
42503 struct path path;
42504
42505 /* Are we allowed to snoop on the tasks file descriptors? */
42506- if (!proc_fd_access_allowed(inode))
42507- goto out;
42508+ /* logging this is needed for learning on chromium to work properly,
42509+ but we don't want to flood the logs from 'ps' which does a readlink
42510+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
42511+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
42512+ */
42513+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
42514+ if (!proc_fd_access_allowed(inode,0))
42515+ goto out;
42516+ } else {
42517+ if (!proc_fd_access_allowed(inode,1))
42518+ goto out;
42519+ }
42520
42521 error = PROC_I(inode)->op.proc_get_link(inode, &path);
42522 if (error)
42523@@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
42524 rcu_read_lock();
42525 cred = __task_cred(task);
42526 inode->i_uid = cred->euid;
42527+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42528+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42529+#else
42530 inode->i_gid = cred->egid;
42531+#endif
42532 rcu_read_unlock();
42533 }
42534 security_task_to_inode(task, inode);
42535@@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
42536 struct inode *inode = dentry->d_inode;
42537 struct task_struct *task;
42538 const struct cred *cred;
42539+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42540+ const struct cred *tmpcred = current_cred();
42541+#endif
42542
42543 generic_fillattr(inode, stat);
42544
42545@@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
42546 stat->uid = 0;
42547 stat->gid = 0;
42548 task = pid_task(proc_pid(inode), PIDTYPE_PID);
42549+
42550+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
42551+ rcu_read_unlock();
42552+ return -ENOENT;
42553+ }
42554+
42555 if (task) {
42556+ cred = __task_cred(task);
42557+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42558+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
42559+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42560+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42561+#endif
42562+ ) {
42563+#endif
42564 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42565+#ifdef CONFIG_GRKERNSEC_PROC_USER
42566+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42567+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42568+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42569+#endif
42570 task_dumpable(task)) {
42571- cred = __task_cred(task);
42572 stat->uid = cred->euid;
42573+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42574+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
42575+#else
42576 stat->gid = cred->egid;
42577+#endif
42578 }
42579+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42580+ } else {
42581+ rcu_read_unlock();
42582+ return -ENOENT;
42583+ }
42584+#endif
42585 }
42586 rcu_read_unlock();
42587 return 0;
42588@@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
42589
42590 if (task) {
42591 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42592+#ifdef CONFIG_GRKERNSEC_PROC_USER
42593+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42594+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42595+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42596+#endif
42597 task_dumpable(task)) {
42598 rcu_read_lock();
42599 cred = __task_cred(task);
42600 inode->i_uid = cred->euid;
42601+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42602+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42603+#else
42604 inode->i_gid = cred->egid;
42605+#endif
42606 rcu_read_unlock();
42607 } else {
42608 inode->i_uid = 0;
42609@@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
42610 int fd = proc_fd(inode);
42611
42612 if (task) {
42613- files = get_files_struct(task);
42614+ if (!gr_acl_handle_procpidmem(task))
42615+ files = get_files_struct(task);
42616 put_task_struct(task);
42617 }
42618 if (files) {
42619@@ -2169,11 +2268,21 @@ static const struct file_operations proc
42620 */
42621 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
42622 {
42623+ struct task_struct *task;
42624 int rv = generic_permission(inode, mask, flags, NULL);
42625- if (rv == 0)
42626- return 0;
42627+
42628 if (task_pid(current) == proc_pid(inode))
42629 rv = 0;
42630+
42631+ task = get_proc_task(inode);
42632+ if (task == NULL)
42633+ return rv;
42634+
42635+ if (gr_acl_handle_procpidmem(task))
42636+ rv = -EACCES;
42637+
42638+ put_task_struct(task);
42639+
42640 return rv;
42641 }
42642
42643@@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
42644 if (!task)
42645 goto out_no_task;
42646
42647+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42648+ goto out;
42649+
42650 /*
42651 * Yes, it does not scale. And it should not. Don't add
42652 * new entries into /proc/<tgid>/ without very good reasons.
42653@@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
42654 if (!task)
42655 goto out_no_task;
42656
42657+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42658+ goto out;
42659+
42660 ret = 0;
42661 i = filp->f_pos;
42662 switch (i) {
42663@@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
42664 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
42665 void *cookie)
42666 {
42667- char *s = nd_get_link(nd);
42668+ const char *s = nd_get_link(nd);
42669 if (!IS_ERR(s))
42670 __putname(s);
42671 }
42672@@ -2795,7 +2910,7 @@ static const struct pid_entry tgid_base_
42673 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
42674 #endif
42675 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
42676-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42677+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42678 INF("syscall", S_IRUGO, proc_pid_syscall),
42679 #endif
42680 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42681@@ -2820,10 +2935,10 @@ static const struct pid_entry tgid_base_
42682 #ifdef CONFIG_SECURITY
42683 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42684 #endif
42685-#ifdef CONFIG_KALLSYMS
42686+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42687 INF("wchan", S_IRUGO, proc_pid_wchan),
42688 #endif
42689-#ifdef CONFIG_STACKTRACE
42690+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42691 ONE("stack", S_IRUGO, proc_pid_stack),
42692 #endif
42693 #ifdef CONFIG_SCHEDSTATS
42694@@ -2857,6 +2972,9 @@ static const struct pid_entry tgid_base_
42695 #ifdef CONFIG_HARDWALL
42696 INF("hardwall", S_IRUGO, proc_pid_hardwall),
42697 #endif
42698+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42699+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
42700+#endif
42701 };
42702
42703 static int proc_tgid_base_readdir(struct file * filp,
42704@@ -2982,7 +3100,14 @@ static struct dentry *proc_pid_instantia
42705 if (!inode)
42706 goto out;
42707
42708+#ifdef CONFIG_GRKERNSEC_PROC_USER
42709+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
42710+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42711+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42712+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
42713+#else
42714 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
42715+#endif
42716 inode->i_op = &proc_tgid_base_inode_operations;
42717 inode->i_fop = &proc_tgid_base_operations;
42718 inode->i_flags|=S_IMMUTABLE;
42719@@ -3024,7 +3149,11 @@ struct dentry *proc_pid_lookup(struct in
42720 if (!task)
42721 goto out;
42722
42723+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42724+ goto out_put_task;
42725+
42726 result = proc_pid_instantiate(dir, dentry, task, NULL);
42727+out_put_task:
42728 put_task_struct(task);
42729 out:
42730 return result;
42731@@ -3089,6 +3218,11 @@ int proc_pid_readdir(struct file * filp,
42732 {
42733 unsigned int nr;
42734 struct task_struct *reaper;
42735+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42736+ const struct cred *tmpcred = current_cred();
42737+ const struct cred *itercred;
42738+#endif
42739+ filldir_t __filldir = filldir;
42740 struct tgid_iter iter;
42741 struct pid_namespace *ns;
42742
42743@@ -3112,8 +3246,27 @@ int proc_pid_readdir(struct file * filp,
42744 for (iter = next_tgid(ns, iter);
42745 iter.task;
42746 iter.tgid += 1, iter = next_tgid(ns, iter)) {
42747+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42748+ rcu_read_lock();
42749+ itercred = __task_cred(iter.task);
42750+#endif
42751+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
42752+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42753+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
42754+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42755+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42756+#endif
42757+ )
42758+#endif
42759+ )
42760+ __filldir = &gr_fake_filldir;
42761+ else
42762+ __filldir = filldir;
42763+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42764+ rcu_read_unlock();
42765+#endif
42766 filp->f_pos = iter.tgid + TGID_OFFSET;
42767- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
42768+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
42769 put_task_struct(iter.task);
42770 goto out;
42771 }
42772@@ -3141,7 +3294,7 @@ static const struct pid_entry tid_base_s
42773 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42774 #endif
42775 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
42776-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42777+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42778 INF("syscall", S_IRUGO, proc_pid_syscall),
42779 #endif
42780 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42781@@ -3165,10 +3318,10 @@ static const struct pid_entry tid_base_s
42782 #ifdef CONFIG_SECURITY
42783 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42784 #endif
42785-#ifdef CONFIG_KALLSYMS
42786+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42787 INF("wchan", S_IRUGO, proc_pid_wchan),
42788 #endif
42789-#ifdef CONFIG_STACKTRACE
42790+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42791 ONE("stack", S_IRUGO, proc_pid_stack),
42792 #endif
42793 #ifdef CONFIG_SCHEDSTATS
42794diff -urNp linux-3.0.4/fs/proc/cmdline.c linux-3.0.4/fs/proc/cmdline.c
42795--- linux-3.0.4/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
42796+++ linux-3.0.4/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
42797@@ -23,7 +23,11 @@ static const struct file_operations cmdl
42798
42799 static int __init proc_cmdline_init(void)
42800 {
42801+#ifdef CONFIG_GRKERNSEC_PROC_ADD
42802+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
42803+#else
42804 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
42805+#endif
42806 return 0;
42807 }
42808 module_init(proc_cmdline_init);
42809diff -urNp linux-3.0.4/fs/proc/devices.c linux-3.0.4/fs/proc/devices.c
42810--- linux-3.0.4/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
42811+++ linux-3.0.4/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
42812@@ -64,7 +64,11 @@ static const struct file_operations proc
42813
42814 static int __init proc_devices_init(void)
42815 {
42816+#ifdef CONFIG_GRKERNSEC_PROC_ADD
42817+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
42818+#else
42819 proc_create("devices", 0, NULL, &proc_devinfo_operations);
42820+#endif
42821 return 0;
42822 }
42823 module_init(proc_devices_init);
42824diff -urNp linux-3.0.4/fs/proc/inode.c linux-3.0.4/fs/proc/inode.c
42825--- linux-3.0.4/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
42826+++ linux-3.0.4/fs/proc/inode.c 2011-08-23 21:48:14.000000000 -0400
42827@@ -440,7 +440,11 @@ struct inode *proc_get_inode(struct supe
42828 if (de->mode) {
42829 inode->i_mode = de->mode;
42830 inode->i_uid = de->uid;
42831+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42832+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42833+#else
42834 inode->i_gid = de->gid;
42835+#endif
42836 }
42837 if (de->size)
42838 inode->i_size = de->size;
42839diff -urNp linux-3.0.4/fs/proc/internal.h linux-3.0.4/fs/proc/internal.h
42840--- linux-3.0.4/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
42841+++ linux-3.0.4/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
42842@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
42843 struct pid *pid, struct task_struct *task);
42844 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
42845 struct pid *pid, struct task_struct *task);
42846+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42847+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
42848+#endif
42849 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
42850
42851 extern const struct file_operations proc_maps_operations;
42852diff -urNp linux-3.0.4/fs/proc/Kconfig linux-3.0.4/fs/proc/Kconfig
42853--- linux-3.0.4/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
42854+++ linux-3.0.4/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
42855@@ -30,12 +30,12 @@ config PROC_FS
42856
42857 config PROC_KCORE
42858 bool "/proc/kcore support" if !ARM
42859- depends on PROC_FS && MMU
42860+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
42861
42862 config PROC_VMCORE
42863 bool "/proc/vmcore support"
42864- depends on PROC_FS && CRASH_DUMP
42865- default y
42866+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
42867+ default n
42868 help
42869 Exports the dump image of crashed kernel in ELF format.
42870
42871@@ -59,8 +59,8 @@ config PROC_SYSCTL
42872 limited in memory.
42873
42874 config PROC_PAGE_MONITOR
42875- default y
42876- depends on PROC_FS && MMU
42877+ default n
42878+ depends on PROC_FS && MMU && !GRKERNSEC
42879 bool "Enable /proc page monitoring" if EXPERT
42880 help
42881 Various /proc files exist to monitor process memory utilization:
42882diff -urNp linux-3.0.4/fs/proc/kcore.c linux-3.0.4/fs/proc/kcore.c
42883--- linux-3.0.4/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
42884+++ linux-3.0.4/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
42885@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
42886 off_t offset = 0;
42887 struct kcore_list *m;
42888
42889+ pax_track_stack();
42890+
42891 /* setup ELF header */
42892 elf = (struct elfhdr *) bufp;
42893 bufp += sizeof(struct elfhdr);
42894@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
42895 * the addresses in the elf_phdr on our list.
42896 */
42897 start = kc_offset_to_vaddr(*fpos - elf_buflen);
42898- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
42899+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
42900+ if (tsz > buflen)
42901 tsz = buflen;
42902-
42903+
42904 while (buflen) {
42905 struct kcore_list *m;
42906
42907@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
42908 kfree(elf_buf);
42909 } else {
42910 if (kern_addr_valid(start)) {
42911- unsigned long n;
42912+ char *elf_buf;
42913+ mm_segment_t oldfs;
42914
42915- n = copy_to_user(buffer, (char *)start, tsz);
42916- /*
42917- * We cannot distingush between fault on source
42918- * and fault on destination. When this happens
42919- * we clear too and hope it will trigger the
42920- * EFAULT again.
42921- */
42922- if (n) {
42923- if (clear_user(buffer + tsz - n,
42924- n))
42925+ elf_buf = kmalloc(tsz, GFP_KERNEL);
42926+ if (!elf_buf)
42927+ return -ENOMEM;
42928+ oldfs = get_fs();
42929+ set_fs(KERNEL_DS);
42930+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
42931+ set_fs(oldfs);
42932+ if (copy_to_user(buffer, elf_buf, tsz)) {
42933+ kfree(elf_buf);
42934 return -EFAULT;
42935+ }
42936 }
42937+ set_fs(oldfs);
42938+ kfree(elf_buf);
42939 } else {
42940 if (clear_user(buffer, tsz))
42941 return -EFAULT;
42942@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
42943
42944 static int open_kcore(struct inode *inode, struct file *filp)
42945 {
42946+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
42947+ return -EPERM;
42948+#endif
42949 if (!capable(CAP_SYS_RAWIO))
42950 return -EPERM;
42951 if (kcore_need_update)
42952diff -urNp linux-3.0.4/fs/proc/meminfo.c linux-3.0.4/fs/proc/meminfo.c
42953--- linux-3.0.4/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
42954+++ linux-3.0.4/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
42955@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
42956 unsigned long pages[NR_LRU_LISTS];
42957 int lru;
42958
42959+ pax_track_stack();
42960+
42961 /*
42962 * display in kilobytes.
42963 */
42964@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
42965 vmi.used >> 10,
42966 vmi.largest_chunk >> 10
42967 #ifdef CONFIG_MEMORY_FAILURE
42968- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
42969+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
42970 #endif
42971 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
42972 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
42973diff -urNp linux-3.0.4/fs/proc/nommu.c linux-3.0.4/fs/proc/nommu.c
42974--- linux-3.0.4/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
42975+++ linux-3.0.4/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
42976@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
42977 if (len < 1)
42978 len = 1;
42979 seq_printf(m, "%*c", len, ' ');
42980- seq_path(m, &file->f_path, "");
42981+ seq_path(m, &file->f_path, "\n\\");
42982 }
42983
42984 seq_putc(m, '\n');
42985diff -urNp linux-3.0.4/fs/proc/proc_net.c linux-3.0.4/fs/proc/proc_net.c
42986--- linux-3.0.4/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
42987+++ linux-3.0.4/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
42988@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
42989 struct task_struct *task;
42990 struct nsproxy *ns;
42991 struct net *net = NULL;
42992+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42993+ const struct cred *cred = current_cred();
42994+#endif
42995+
42996+#ifdef CONFIG_GRKERNSEC_PROC_USER
42997+ if (cred->fsuid)
42998+ return net;
42999+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43000+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
43001+ return net;
43002+#endif
43003
43004 rcu_read_lock();
43005 task = pid_task(proc_pid(dir), PIDTYPE_PID);
43006diff -urNp linux-3.0.4/fs/proc/proc_sysctl.c linux-3.0.4/fs/proc/proc_sysctl.c
43007--- linux-3.0.4/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
43008+++ linux-3.0.4/fs/proc/proc_sysctl.c 2011-08-23 21:48:14.000000000 -0400
43009@@ -8,6 +8,8 @@
43010 #include <linux/namei.h>
43011 #include "internal.h"
43012
43013+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
43014+
43015 static const struct dentry_operations proc_sys_dentry_operations;
43016 static const struct file_operations proc_sys_file_operations;
43017 static const struct inode_operations proc_sys_inode_operations;
43018@@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
43019 if (!p)
43020 goto out;
43021
43022+ if (gr_handle_sysctl(p, MAY_EXEC))
43023+ goto out;
43024+
43025 err = ERR_PTR(-ENOMEM);
43026 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
43027 if (h)
43028@@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
43029 if (*pos < file->f_pos)
43030 continue;
43031
43032+ if (gr_handle_sysctl(table, 0))
43033+ continue;
43034+
43035 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
43036 if (res)
43037 return res;
43038@@ -355,6 +363,9 @@ static int proc_sys_getattr(struct vfsmo
43039 if (IS_ERR(head))
43040 return PTR_ERR(head);
43041
43042+ if (table && gr_handle_sysctl(table, MAY_EXEC))
43043+ return -ENOENT;
43044+
43045 generic_fillattr(inode, stat);
43046 if (table)
43047 stat->mode = (stat->mode & S_IFMT) | table->mode;
43048diff -urNp linux-3.0.4/fs/proc/root.c linux-3.0.4/fs/proc/root.c
43049--- linux-3.0.4/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
43050+++ linux-3.0.4/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
43051@@ -123,7 +123,15 @@ void __init proc_root_init(void)
43052 #ifdef CONFIG_PROC_DEVICETREE
43053 proc_device_tree_init();
43054 #endif
43055+#ifdef CONFIG_GRKERNSEC_PROC_ADD
43056+#ifdef CONFIG_GRKERNSEC_PROC_USER
43057+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
43058+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43059+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
43060+#endif
43061+#else
43062 proc_mkdir("bus", NULL);
43063+#endif
43064 proc_sys_init();
43065 }
43066
43067diff -urNp linux-3.0.4/fs/proc/task_mmu.c linux-3.0.4/fs/proc/task_mmu.c
43068--- linux-3.0.4/fs/proc/task_mmu.c 2011-07-21 22:17:23.000000000 -0400
43069+++ linux-3.0.4/fs/proc/task_mmu.c 2011-08-23 21:48:14.000000000 -0400
43070@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
43071 "VmExe:\t%8lu kB\n"
43072 "VmLib:\t%8lu kB\n"
43073 "VmPTE:\t%8lu kB\n"
43074- "VmSwap:\t%8lu kB\n",
43075- hiwater_vm << (PAGE_SHIFT-10),
43076+ "VmSwap:\t%8lu kB\n"
43077+
43078+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43079+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
43080+#endif
43081+
43082+ ,hiwater_vm << (PAGE_SHIFT-10),
43083 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
43084 mm->locked_vm << (PAGE_SHIFT-10),
43085 hiwater_rss << (PAGE_SHIFT-10),
43086@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
43087 data << (PAGE_SHIFT-10),
43088 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
43089 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
43090- swap << (PAGE_SHIFT-10));
43091+ swap << (PAGE_SHIFT-10)
43092+
43093+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43094+ , mm->context.user_cs_base, mm->context.user_cs_limit
43095+#endif
43096+
43097+ );
43098 }
43099
43100 unsigned long task_vsize(struct mm_struct *mm)
43101@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
43102 return ret;
43103 }
43104
43105+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43106+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
43107+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
43108+ _mm->pax_flags & MF_PAX_SEGMEXEC))
43109+#endif
43110+
43111 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
43112 {
43113 struct mm_struct *mm = vma->vm_mm;
43114@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
43115 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
43116 }
43117
43118- /* We don't show the stack guard page in /proc/maps */
43119+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43120+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
43121+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
43122+#else
43123 start = vma->vm_start;
43124- if (stack_guard_page_start(vma, start))
43125- start += PAGE_SIZE;
43126 end = vma->vm_end;
43127- if (stack_guard_page_end(vma, end))
43128- end -= PAGE_SIZE;
43129+#endif
43130
43131 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
43132 start,
43133@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
43134 flags & VM_WRITE ? 'w' : '-',
43135 flags & VM_EXEC ? 'x' : '-',
43136 flags & VM_MAYSHARE ? 's' : 'p',
43137+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43138+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
43139+#else
43140 pgoff,
43141+#endif
43142 MAJOR(dev), MINOR(dev), ino, &len);
43143
43144 /*
43145@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
43146 */
43147 if (file) {
43148 pad_len_spaces(m, len);
43149- seq_path(m, &file->f_path, "\n");
43150+ seq_path(m, &file->f_path, "\n\\");
43151 } else {
43152 const char *name = arch_vma_name(vma);
43153 if (!name) {
43154@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
43155 if (vma->vm_start <= mm->brk &&
43156 vma->vm_end >= mm->start_brk) {
43157 name = "[heap]";
43158- } else if (vma->vm_start <= mm->start_stack &&
43159- vma->vm_end >= mm->start_stack) {
43160+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
43161+ (vma->vm_start <= mm->start_stack &&
43162+ vma->vm_end >= mm->start_stack)) {
43163 name = "[stack]";
43164 }
43165 } else {
43166@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
43167 };
43168
43169 memset(&mss, 0, sizeof mss);
43170- mss.vma = vma;
43171- /* mmap_sem is held in m_start */
43172- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43173- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43174-
43175+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43176+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
43177+#endif
43178+ mss.vma = vma;
43179+ /* mmap_sem is held in m_start */
43180+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43181+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43182+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43183+ }
43184+#endif
43185 show_map_vma(m, vma);
43186
43187 seq_printf(m,
43188@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
43189 "KernelPageSize: %8lu kB\n"
43190 "MMUPageSize: %8lu kB\n"
43191 "Locked: %8lu kB\n",
43192+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43193+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
43194+#else
43195 (vma->vm_end - vma->vm_start) >> 10,
43196+#endif
43197 mss.resident >> 10,
43198 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
43199 mss.shared_clean >> 10,
43200@@ -1001,7 +1032,7 @@ static int show_numa_map(struct seq_file
43201
43202 if (file) {
43203 seq_printf(m, " file=");
43204- seq_path(m, &file->f_path, "\n\t= ");
43205+ seq_path(m, &file->f_path, "\n\t\\= ");
43206 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
43207 seq_printf(m, " heap");
43208 } else if (vma->vm_start <= mm->start_stack &&
43209diff -urNp linux-3.0.4/fs/proc/task_nommu.c linux-3.0.4/fs/proc/task_nommu.c
43210--- linux-3.0.4/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
43211+++ linux-3.0.4/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
43212@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
43213 else
43214 bytes += kobjsize(mm);
43215
43216- if (current->fs && current->fs->users > 1)
43217+ if (current->fs && atomic_read(&current->fs->users) > 1)
43218 sbytes += kobjsize(current->fs);
43219 else
43220 bytes += kobjsize(current->fs);
43221@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
43222
43223 if (file) {
43224 pad_len_spaces(m, len);
43225- seq_path(m, &file->f_path, "");
43226+ seq_path(m, &file->f_path, "\n\\");
43227 } else if (mm) {
43228 if (vma->vm_start <= mm->start_stack &&
43229 vma->vm_end >= mm->start_stack) {
43230diff -urNp linux-3.0.4/fs/quota/netlink.c linux-3.0.4/fs/quota/netlink.c
43231--- linux-3.0.4/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
43232+++ linux-3.0.4/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
43233@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
43234 void quota_send_warning(short type, unsigned int id, dev_t dev,
43235 const char warntype)
43236 {
43237- static atomic_t seq;
43238+ static atomic_unchecked_t seq;
43239 struct sk_buff *skb;
43240 void *msg_head;
43241 int ret;
43242@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
43243 "VFS: Not enough memory to send quota warning.\n");
43244 return;
43245 }
43246- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
43247+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
43248 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
43249 if (!msg_head) {
43250 printk(KERN_ERR
43251diff -urNp linux-3.0.4/fs/readdir.c linux-3.0.4/fs/readdir.c
43252--- linux-3.0.4/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
43253+++ linux-3.0.4/fs/readdir.c 2011-08-23 21:48:14.000000000 -0400
43254@@ -17,6 +17,7 @@
43255 #include <linux/security.h>
43256 #include <linux/syscalls.h>
43257 #include <linux/unistd.h>
43258+#include <linux/namei.h>
43259
43260 #include <asm/uaccess.h>
43261
43262@@ -67,6 +68,7 @@ struct old_linux_dirent {
43263
43264 struct readdir_callback {
43265 struct old_linux_dirent __user * dirent;
43266+ struct file * file;
43267 int result;
43268 };
43269
43270@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
43271 buf->result = -EOVERFLOW;
43272 return -EOVERFLOW;
43273 }
43274+
43275+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43276+ return 0;
43277+
43278 buf->result++;
43279 dirent = buf->dirent;
43280 if (!access_ok(VERIFY_WRITE, dirent,
43281@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
43282
43283 buf.result = 0;
43284 buf.dirent = dirent;
43285+ buf.file = file;
43286
43287 error = vfs_readdir(file, fillonedir, &buf);
43288 if (buf.result)
43289@@ -142,6 +149,7 @@ struct linux_dirent {
43290 struct getdents_callback {
43291 struct linux_dirent __user * current_dir;
43292 struct linux_dirent __user * previous;
43293+ struct file * file;
43294 int count;
43295 int error;
43296 };
43297@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
43298 buf->error = -EOVERFLOW;
43299 return -EOVERFLOW;
43300 }
43301+
43302+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43303+ return 0;
43304+
43305 dirent = buf->previous;
43306 if (dirent) {
43307 if (__put_user(offset, &dirent->d_off))
43308@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
43309 buf.previous = NULL;
43310 buf.count = count;
43311 buf.error = 0;
43312+ buf.file = file;
43313
43314 error = vfs_readdir(file, filldir, &buf);
43315 if (error >= 0)
43316@@ -229,6 +242,7 @@ out:
43317 struct getdents_callback64 {
43318 struct linux_dirent64 __user * current_dir;
43319 struct linux_dirent64 __user * previous;
43320+ struct file *file;
43321 int count;
43322 int error;
43323 };
43324@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
43325 buf->error = -EINVAL; /* only used if we fail.. */
43326 if (reclen > buf->count)
43327 return -EINVAL;
43328+
43329+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43330+ return 0;
43331+
43332 dirent = buf->previous;
43333 if (dirent) {
43334 if (__put_user(offset, &dirent->d_off))
43335@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
43336
43337 buf.current_dir = dirent;
43338 buf.previous = NULL;
43339+ buf.file = file;
43340 buf.count = count;
43341 buf.error = 0;
43342
43343diff -urNp linux-3.0.4/fs/reiserfs/dir.c linux-3.0.4/fs/reiserfs/dir.c
43344--- linux-3.0.4/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
43345+++ linux-3.0.4/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
43346@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
43347 struct reiserfs_dir_entry de;
43348 int ret = 0;
43349
43350+ pax_track_stack();
43351+
43352 reiserfs_write_lock(inode->i_sb);
43353
43354 reiserfs_check_lock_depth(inode->i_sb, "readdir");
43355diff -urNp linux-3.0.4/fs/reiserfs/do_balan.c linux-3.0.4/fs/reiserfs/do_balan.c
43356--- linux-3.0.4/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
43357+++ linux-3.0.4/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
43358@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
43359 return;
43360 }
43361
43362- atomic_inc(&(fs_generation(tb->tb_sb)));
43363+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
43364 do_balance_starts(tb);
43365
43366 /* balance leaf returns 0 except if combining L R and S into
43367diff -urNp linux-3.0.4/fs/reiserfs/journal.c linux-3.0.4/fs/reiserfs/journal.c
43368--- linux-3.0.4/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
43369+++ linux-3.0.4/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
43370@@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
43371 struct buffer_head *bh;
43372 int i, j;
43373
43374+ pax_track_stack();
43375+
43376 bh = __getblk(dev, block, bufsize);
43377 if (buffer_uptodate(bh))
43378 return (bh);
43379diff -urNp linux-3.0.4/fs/reiserfs/namei.c linux-3.0.4/fs/reiserfs/namei.c
43380--- linux-3.0.4/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
43381+++ linux-3.0.4/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
43382@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
43383 unsigned long savelink = 1;
43384 struct timespec ctime;
43385
43386+ pax_track_stack();
43387+
43388 /* three balancings: (1) old name removal, (2) new name insertion
43389 and (3) maybe "save" link insertion
43390 stat data updates: (1) old directory,
43391diff -urNp linux-3.0.4/fs/reiserfs/procfs.c linux-3.0.4/fs/reiserfs/procfs.c
43392--- linux-3.0.4/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
43393+++ linux-3.0.4/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
43394@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
43395 "SMALL_TAILS " : "NO_TAILS ",
43396 replay_only(sb) ? "REPLAY_ONLY " : "",
43397 convert_reiserfs(sb) ? "CONV " : "",
43398- atomic_read(&r->s_generation_counter),
43399+ atomic_read_unchecked(&r->s_generation_counter),
43400 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
43401 SF(s_do_balance), SF(s_unneeded_left_neighbor),
43402 SF(s_good_search_by_key_reada), SF(s_bmaps),
43403@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
43404 struct journal_params *jp = &rs->s_v1.s_journal;
43405 char b[BDEVNAME_SIZE];
43406
43407+ pax_track_stack();
43408+
43409 seq_printf(m, /* on-disk fields */
43410 "jp_journal_1st_block: \t%i\n"
43411 "jp_journal_dev: \t%s[%x]\n"
43412diff -urNp linux-3.0.4/fs/reiserfs/stree.c linux-3.0.4/fs/reiserfs/stree.c
43413--- linux-3.0.4/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
43414+++ linux-3.0.4/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
43415@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
43416 int iter = 0;
43417 #endif
43418
43419+ pax_track_stack();
43420+
43421 BUG_ON(!th->t_trans_id);
43422
43423 init_tb_struct(th, &s_del_balance, sb, path,
43424@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
43425 int retval;
43426 int quota_cut_bytes = 0;
43427
43428+ pax_track_stack();
43429+
43430 BUG_ON(!th->t_trans_id);
43431
43432 le_key2cpu_key(&cpu_key, key);
43433@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
43434 int quota_cut_bytes;
43435 loff_t tail_pos = 0;
43436
43437+ pax_track_stack();
43438+
43439 BUG_ON(!th->t_trans_id);
43440
43441 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
43442@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
43443 int retval;
43444 int fs_gen;
43445
43446+ pax_track_stack();
43447+
43448 BUG_ON(!th->t_trans_id);
43449
43450 fs_gen = get_generation(inode->i_sb);
43451@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
43452 int fs_gen = 0;
43453 int quota_bytes = 0;
43454
43455+ pax_track_stack();
43456+
43457 BUG_ON(!th->t_trans_id);
43458
43459 if (inode) { /* Do we count quotas for item? */
43460diff -urNp linux-3.0.4/fs/reiserfs/super.c linux-3.0.4/fs/reiserfs/super.c
43461--- linux-3.0.4/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
43462+++ linux-3.0.4/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
43463@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
43464 {.option_name = NULL}
43465 };
43466
43467+ pax_track_stack();
43468+
43469 *blocks = 0;
43470 if (!options || !*options)
43471 /* use default configuration: create tails, journaling on, no
43472diff -urNp linux-3.0.4/fs/select.c linux-3.0.4/fs/select.c
43473--- linux-3.0.4/fs/select.c 2011-07-21 22:17:23.000000000 -0400
43474+++ linux-3.0.4/fs/select.c 2011-08-23 21:48:14.000000000 -0400
43475@@ -20,6 +20,7 @@
43476 #include <linux/module.h>
43477 #include <linux/slab.h>
43478 #include <linux/poll.h>
43479+#include <linux/security.h>
43480 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
43481 #include <linux/file.h>
43482 #include <linux/fdtable.h>
43483@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
43484 int retval, i, timed_out = 0;
43485 unsigned long slack = 0;
43486
43487+ pax_track_stack();
43488+
43489 rcu_read_lock();
43490 retval = max_select_fd(n, fds);
43491 rcu_read_unlock();
43492@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
43493 /* Allocate small arguments on the stack to save memory and be faster */
43494 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
43495
43496+ pax_track_stack();
43497+
43498 ret = -EINVAL;
43499 if (n < 0)
43500 goto out_nofds;
43501@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
43502 struct poll_list *walk = head;
43503 unsigned long todo = nfds;
43504
43505+ pax_track_stack();
43506+
43507+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
43508 if (nfds > rlimit(RLIMIT_NOFILE))
43509 return -EINVAL;
43510
43511diff -urNp linux-3.0.4/fs/seq_file.c linux-3.0.4/fs/seq_file.c
43512--- linux-3.0.4/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
43513+++ linux-3.0.4/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
43514@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
43515 return 0;
43516 }
43517 if (!m->buf) {
43518- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43519+ m->size = PAGE_SIZE;
43520+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43521 if (!m->buf)
43522 return -ENOMEM;
43523 }
43524@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
43525 Eoverflow:
43526 m->op->stop(m, p);
43527 kfree(m->buf);
43528- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43529+ m->size <<= 1;
43530+ m->buf = kmalloc(m->size, GFP_KERNEL);
43531 return !m->buf ? -ENOMEM : -EAGAIN;
43532 }
43533
43534@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
43535 m->version = file->f_version;
43536 /* grab buffer if we didn't have one */
43537 if (!m->buf) {
43538- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43539+ m->size = PAGE_SIZE;
43540+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43541 if (!m->buf)
43542 goto Enomem;
43543 }
43544@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
43545 goto Fill;
43546 m->op->stop(m, p);
43547 kfree(m->buf);
43548- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43549+ m->size <<= 1;
43550+ m->buf = kmalloc(m->size, GFP_KERNEL);
43551 if (!m->buf)
43552 goto Enomem;
43553 m->count = 0;
43554@@ -549,7 +553,7 @@ static void single_stop(struct seq_file
43555 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
43556 void *data)
43557 {
43558- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
43559+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
43560 int res = -ENOMEM;
43561
43562 if (op) {
43563diff -urNp linux-3.0.4/fs/splice.c linux-3.0.4/fs/splice.c
43564--- linux-3.0.4/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
43565+++ linux-3.0.4/fs/splice.c 2011-08-23 21:48:14.000000000 -0400
43566@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
43567 pipe_lock(pipe);
43568
43569 for (;;) {
43570- if (!pipe->readers) {
43571+ if (!atomic_read(&pipe->readers)) {
43572 send_sig(SIGPIPE, current, 0);
43573 if (!ret)
43574 ret = -EPIPE;
43575@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
43576 do_wakeup = 0;
43577 }
43578
43579- pipe->waiting_writers++;
43580+ atomic_inc(&pipe->waiting_writers);
43581 pipe_wait(pipe);
43582- pipe->waiting_writers--;
43583+ atomic_dec(&pipe->waiting_writers);
43584 }
43585
43586 pipe_unlock(pipe);
43587@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
43588 .spd_release = spd_release_page,
43589 };
43590
43591+ pax_track_stack();
43592+
43593 if (splice_grow_spd(pipe, &spd))
43594 return -ENOMEM;
43595
43596@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
43597 old_fs = get_fs();
43598 set_fs(get_ds());
43599 /* The cast to a user pointer is valid due to the set_fs() */
43600- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
43601+ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
43602 set_fs(old_fs);
43603
43604 return res;
43605@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
43606 old_fs = get_fs();
43607 set_fs(get_ds());
43608 /* The cast to a user pointer is valid due to the set_fs() */
43609- res = vfs_write(file, (const char __user *)buf, count, &pos);
43610+ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
43611 set_fs(old_fs);
43612
43613 return res;
43614@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
43615 .spd_release = spd_release_page,
43616 };
43617
43618+ pax_track_stack();
43619+
43620 if (splice_grow_spd(pipe, &spd))
43621 return -ENOMEM;
43622
43623@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
43624 goto err;
43625
43626 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
43627- vec[i].iov_base = (void __user *) page_address(page);
43628+ vec[i].iov_base = (__force void __user *) page_address(page);
43629 vec[i].iov_len = this_len;
43630 spd.pages[i] = page;
43631 spd.nr_pages++;
43632@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
43633 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
43634 {
43635 while (!pipe->nrbufs) {
43636- if (!pipe->writers)
43637+ if (!atomic_read(&pipe->writers))
43638 return 0;
43639
43640- if (!pipe->waiting_writers && sd->num_spliced)
43641+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
43642 return 0;
43643
43644 if (sd->flags & SPLICE_F_NONBLOCK)
43645@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
43646 * out of the pipe right after the splice_to_pipe(). So set
43647 * PIPE_READERS appropriately.
43648 */
43649- pipe->readers = 1;
43650+ atomic_set(&pipe->readers, 1);
43651
43652 current->splice_pipe = pipe;
43653 }
43654@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
43655 };
43656 long ret;
43657
43658+ pax_track_stack();
43659+
43660 pipe = get_pipe_info(file);
43661 if (!pipe)
43662 return -EBADF;
43663@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
43664 ret = -ERESTARTSYS;
43665 break;
43666 }
43667- if (!pipe->writers)
43668+ if (!atomic_read(&pipe->writers))
43669 break;
43670- if (!pipe->waiting_writers) {
43671+ if (!atomic_read(&pipe->waiting_writers)) {
43672 if (flags & SPLICE_F_NONBLOCK) {
43673 ret = -EAGAIN;
43674 break;
43675@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
43676 pipe_lock(pipe);
43677
43678 while (pipe->nrbufs >= pipe->buffers) {
43679- if (!pipe->readers) {
43680+ if (!atomic_read(&pipe->readers)) {
43681 send_sig(SIGPIPE, current, 0);
43682 ret = -EPIPE;
43683 break;
43684@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
43685 ret = -ERESTARTSYS;
43686 break;
43687 }
43688- pipe->waiting_writers++;
43689+ atomic_inc(&pipe->waiting_writers);
43690 pipe_wait(pipe);
43691- pipe->waiting_writers--;
43692+ atomic_dec(&pipe->waiting_writers);
43693 }
43694
43695 pipe_unlock(pipe);
43696@@ -1819,14 +1825,14 @@ retry:
43697 pipe_double_lock(ipipe, opipe);
43698
43699 do {
43700- if (!opipe->readers) {
43701+ if (!atomic_read(&opipe->readers)) {
43702 send_sig(SIGPIPE, current, 0);
43703 if (!ret)
43704 ret = -EPIPE;
43705 break;
43706 }
43707
43708- if (!ipipe->nrbufs && !ipipe->writers)
43709+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
43710 break;
43711
43712 /*
43713@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
43714 pipe_double_lock(ipipe, opipe);
43715
43716 do {
43717- if (!opipe->readers) {
43718+ if (!atomic_read(&opipe->readers)) {
43719 send_sig(SIGPIPE, current, 0);
43720 if (!ret)
43721 ret = -EPIPE;
43722@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
43723 * return EAGAIN if we have the potential of some data in the
43724 * future, otherwise just return 0
43725 */
43726- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
43727+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
43728 ret = -EAGAIN;
43729
43730 pipe_unlock(ipipe);
43731diff -urNp linux-3.0.4/fs/sysfs/file.c linux-3.0.4/fs/sysfs/file.c
43732--- linux-3.0.4/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
43733+++ linux-3.0.4/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
43734@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
43735
43736 struct sysfs_open_dirent {
43737 atomic_t refcnt;
43738- atomic_t event;
43739+ atomic_unchecked_t event;
43740 wait_queue_head_t poll;
43741 struct list_head buffers; /* goes through sysfs_buffer.list */
43742 };
43743@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
43744 if (!sysfs_get_active(attr_sd))
43745 return -ENODEV;
43746
43747- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
43748+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
43749 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
43750
43751 sysfs_put_active(attr_sd);
43752@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
43753 return -ENOMEM;
43754
43755 atomic_set(&new_od->refcnt, 0);
43756- atomic_set(&new_od->event, 1);
43757+ atomic_set_unchecked(&new_od->event, 1);
43758 init_waitqueue_head(&new_od->poll);
43759 INIT_LIST_HEAD(&new_od->buffers);
43760 goto retry;
43761@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
43762
43763 sysfs_put_active(attr_sd);
43764
43765- if (buffer->event != atomic_read(&od->event))
43766+ if (buffer->event != atomic_read_unchecked(&od->event))
43767 goto trigger;
43768
43769 return DEFAULT_POLLMASK;
43770@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
43771
43772 od = sd->s_attr.open;
43773 if (od) {
43774- atomic_inc(&od->event);
43775+ atomic_inc_unchecked(&od->event);
43776 wake_up_interruptible(&od->poll);
43777 }
43778
43779diff -urNp linux-3.0.4/fs/sysfs/mount.c linux-3.0.4/fs/sysfs/mount.c
43780--- linux-3.0.4/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
43781+++ linux-3.0.4/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
43782@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
43783 .s_name = "",
43784 .s_count = ATOMIC_INIT(1),
43785 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
43786+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43787+ .s_mode = S_IFDIR | S_IRWXU,
43788+#else
43789 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43790+#endif
43791 .s_ino = 1,
43792 };
43793
43794diff -urNp linux-3.0.4/fs/sysfs/symlink.c linux-3.0.4/fs/sysfs/symlink.c
43795--- linux-3.0.4/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
43796+++ linux-3.0.4/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
43797@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
43798
43799 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
43800 {
43801- char *page = nd_get_link(nd);
43802+ const char *page = nd_get_link(nd);
43803 if (!IS_ERR(page))
43804 free_page((unsigned long)page);
43805 }
43806diff -urNp linux-3.0.4/fs/udf/inode.c linux-3.0.4/fs/udf/inode.c
43807--- linux-3.0.4/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
43808+++ linux-3.0.4/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
43809@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
43810 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
43811 int lastblock = 0;
43812
43813+ pax_track_stack();
43814+
43815 prev_epos.offset = udf_file_entry_alloc_offset(inode);
43816 prev_epos.block = iinfo->i_location;
43817 prev_epos.bh = NULL;
43818diff -urNp linux-3.0.4/fs/udf/misc.c linux-3.0.4/fs/udf/misc.c
43819--- linux-3.0.4/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
43820+++ linux-3.0.4/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
43821@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
43822
43823 u8 udf_tag_checksum(const struct tag *t)
43824 {
43825- u8 *data = (u8 *)t;
43826+ const u8 *data = (const u8 *)t;
43827 u8 checksum = 0;
43828 int i;
43829 for (i = 0; i < sizeof(struct tag); ++i)
43830diff -urNp linux-3.0.4/fs/utimes.c linux-3.0.4/fs/utimes.c
43831--- linux-3.0.4/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
43832+++ linux-3.0.4/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
43833@@ -1,6 +1,7 @@
43834 #include <linux/compiler.h>
43835 #include <linux/file.h>
43836 #include <linux/fs.h>
43837+#include <linux/security.h>
43838 #include <linux/linkage.h>
43839 #include <linux/mount.h>
43840 #include <linux/namei.h>
43841@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
43842 goto mnt_drop_write_and_out;
43843 }
43844 }
43845+
43846+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
43847+ error = -EACCES;
43848+ goto mnt_drop_write_and_out;
43849+ }
43850+
43851 mutex_lock(&inode->i_mutex);
43852 error = notify_change(path->dentry, &newattrs);
43853 mutex_unlock(&inode->i_mutex);
43854diff -urNp linux-3.0.4/fs/xattr_acl.c linux-3.0.4/fs/xattr_acl.c
43855--- linux-3.0.4/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
43856+++ linux-3.0.4/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
43857@@ -17,8 +17,8 @@
43858 struct posix_acl *
43859 posix_acl_from_xattr(const void *value, size_t size)
43860 {
43861- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
43862- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
43863+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
43864+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
43865 int count;
43866 struct posix_acl *acl;
43867 struct posix_acl_entry *acl_e;
43868diff -urNp linux-3.0.4/fs/xattr.c linux-3.0.4/fs/xattr.c
43869--- linux-3.0.4/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
43870+++ linux-3.0.4/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
43871@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
43872 * Extended attribute SET operations
43873 */
43874 static long
43875-setxattr(struct dentry *d, const char __user *name, const void __user *value,
43876+setxattr(struct path *path, const char __user *name, const void __user *value,
43877 size_t size, int flags)
43878 {
43879 int error;
43880@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
43881 return PTR_ERR(kvalue);
43882 }
43883
43884- error = vfs_setxattr(d, kname, kvalue, size, flags);
43885+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
43886+ error = -EACCES;
43887+ goto out;
43888+ }
43889+
43890+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
43891+out:
43892 kfree(kvalue);
43893 return error;
43894 }
43895@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
43896 return error;
43897 error = mnt_want_write(path.mnt);
43898 if (!error) {
43899- error = setxattr(path.dentry, name, value, size, flags);
43900+ error = setxattr(&path, name, value, size, flags);
43901 mnt_drop_write(path.mnt);
43902 }
43903 path_put(&path);
43904@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
43905 return error;
43906 error = mnt_want_write(path.mnt);
43907 if (!error) {
43908- error = setxattr(path.dentry, name, value, size, flags);
43909+ error = setxattr(&path, name, value, size, flags);
43910 mnt_drop_write(path.mnt);
43911 }
43912 path_put(&path);
43913@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
43914 const void __user *,value, size_t, size, int, flags)
43915 {
43916 struct file *f;
43917- struct dentry *dentry;
43918 int error = -EBADF;
43919
43920 f = fget(fd);
43921 if (!f)
43922 return error;
43923- dentry = f->f_path.dentry;
43924- audit_inode(NULL, dentry);
43925+ audit_inode(NULL, f->f_path.dentry);
43926 error = mnt_want_write_file(f);
43927 if (!error) {
43928- error = setxattr(dentry, name, value, size, flags);
43929+ error = setxattr(&f->f_path, name, value, size, flags);
43930 mnt_drop_write(f->f_path.mnt);
43931 }
43932 fput(f);
43933diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c
43934--- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
43935+++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
43936@@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
43937 xfs_fsop_geom_t fsgeo;
43938 int error;
43939
43940+ memset(&fsgeo, 0, sizeof(fsgeo));
43941 error = xfs_fs_geometry(mp, &fsgeo, 3);
43942 if (error)
43943 return -error;
43944diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c
43945--- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
43946+++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
43947@@ -128,7 +128,7 @@ xfs_find_handle(
43948 }
43949
43950 error = -EFAULT;
43951- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
43952+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
43953 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
43954 goto out_put;
43955
43956diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c
43957--- linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
43958+++ linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
43959@@ -437,7 +437,7 @@ xfs_vn_put_link(
43960 struct nameidata *nd,
43961 void *p)
43962 {
43963- char *s = nd_get_link(nd);
43964+ const char *s = nd_get_link(nd);
43965
43966 if (!IS_ERR(s))
43967 kfree(s);
43968diff -urNp linux-3.0.4/fs/xfs/xfs_bmap.c linux-3.0.4/fs/xfs/xfs_bmap.c
43969--- linux-3.0.4/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
43970+++ linux-3.0.4/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
43971@@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
43972 int nmap,
43973 int ret_nmap);
43974 #else
43975-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
43976+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
43977 #endif /* DEBUG */
43978
43979 STATIC int
43980diff -urNp linux-3.0.4/fs/xfs/xfs_dir2_sf.c linux-3.0.4/fs/xfs/xfs_dir2_sf.c
43981--- linux-3.0.4/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
43982+++ linux-3.0.4/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
43983@@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
43984 }
43985
43986 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
43987- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
43988+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
43989+ char name[sfep->namelen];
43990+ memcpy(name, sfep->name, sfep->namelen);
43991+ if (filldir(dirent, name, sfep->namelen,
43992+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
43993+ *offset = off & 0x7fffffff;
43994+ return 0;
43995+ }
43996+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
43997 off & 0x7fffffff, ino, DT_UNKNOWN)) {
43998 *offset = off & 0x7fffffff;
43999 return 0;
44000diff -urNp linux-3.0.4/grsecurity/gracl_alloc.c linux-3.0.4/grsecurity/gracl_alloc.c
44001--- linux-3.0.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
44002+++ linux-3.0.4/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
44003@@ -0,0 +1,105 @@
44004+#include <linux/kernel.h>
44005+#include <linux/mm.h>
44006+#include <linux/slab.h>
44007+#include <linux/vmalloc.h>
44008+#include <linux/gracl.h>
44009+#include <linux/grsecurity.h>
44010+
44011+static unsigned long alloc_stack_next = 1;
44012+static unsigned long alloc_stack_size = 1;
44013+static void **alloc_stack;
44014+
44015+static __inline__ int
44016+alloc_pop(void)
44017+{
44018+ if (alloc_stack_next == 1)
44019+ return 0;
44020+
44021+ kfree(alloc_stack[alloc_stack_next - 2]);
44022+
44023+ alloc_stack_next--;
44024+
44025+ return 1;
44026+}
44027+
44028+static __inline__ int
44029+alloc_push(void *buf)
44030+{
44031+ if (alloc_stack_next >= alloc_stack_size)
44032+ return 1;
44033+
44034+ alloc_stack[alloc_stack_next - 1] = buf;
44035+
44036+ alloc_stack_next++;
44037+
44038+ return 0;
44039+}
44040+
44041+void *
44042+acl_alloc(unsigned long len)
44043+{
44044+ void *ret = NULL;
44045+
44046+ if (!len || len > PAGE_SIZE)
44047+ goto out;
44048+
44049+ ret = kmalloc(len, GFP_KERNEL);
44050+
44051+ if (ret) {
44052+ if (alloc_push(ret)) {
44053+ kfree(ret);
44054+ ret = NULL;
44055+ }
44056+ }
44057+
44058+out:
44059+ return ret;
44060+}
44061+
44062+void *
44063+acl_alloc_num(unsigned long num, unsigned long len)
44064+{
44065+ if (!len || (num > (PAGE_SIZE / len)))
44066+ return NULL;
44067+
44068+ return acl_alloc(num * len);
44069+}
44070+
44071+void
44072+acl_free_all(void)
44073+{
44074+ if (gr_acl_is_enabled() || !alloc_stack)
44075+ return;
44076+
44077+ while (alloc_pop()) ;
44078+
44079+ if (alloc_stack) {
44080+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
44081+ kfree(alloc_stack);
44082+ else
44083+ vfree(alloc_stack);
44084+ }
44085+
44086+ alloc_stack = NULL;
44087+ alloc_stack_size = 1;
44088+ alloc_stack_next = 1;
44089+
44090+ return;
44091+}
44092+
44093+int
44094+acl_alloc_stack_init(unsigned long size)
44095+{
44096+ if ((size * sizeof (void *)) <= PAGE_SIZE)
44097+ alloc_stack =
44098+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
44099+ else
44100+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
44101+
44102+ alloc_stack_size = size;
44103+
44104+ if (!alloc_stack)
44105+ return 0;
44106+ else
44107+ return 1;
44108+}
44109diff -urNp linux-3.0.4/grsecurity/gracl.c linux-3.0.4/grsecurity/gracl.c
44110--- linux-3.0.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
44111+++ linux-3.0.4/grsecurity/gracl.c 2011-08-23 21:48:14.000000000 -0400
44112@@ -0,0 +1,4106 @@
44113+#include <linux/kernel.h>
44114+#include <linux/module.h>
44115+#include <linux/sched.h>
44116+#include <linux/mm.h>
44117+#include <linux/file.h>
44118+#include <linux/fs.h>
44119+#include <linux/namei.h>
44120+#include <linux/mount.h>
44121+#include <linux/tty.h>
44122+#include <linux/proc_fs.h>
44123+#include <linux/lglock.h>
44124+#include <linux/slab.h>
44125+#include <linux/vmalloc.h>
44126+#include <linux/types.h>
44127+#include <linux/sysctl.h>
44128+#include <linux/netdevice.h>
44129+#include <linux/ptrace.h>
44130+#include <linux/gracl.h>
44131+#include <linux/gralloc.h>
44132+#include <linux/grsecurity.h>
44133+#include <linux/grinternal.h>
44134+#include <linux/pid_namespace.h>
44135+#include <linux/fdtable.h>
44136+#include <linux/percpu.h>
44137+
44138+#include <asm/uaccess.h>
44139+#include <asm/errno.h>
44140+#include <asm/mman.h>
44141+
44142+static struct acl_role_db acl_role_set;
44143+static struct name_db name_set;
44144+static struct inodev_db inodev_set;
44145+
44146+/* for keeping track of userspace pointers used for subjects, so we
44147+ can share references in the kernel as well
44148+*/
44149+
44150+static struct path real_root;
44151+
44152+static struct acl_subj_map_db subj_map_set;
44153+
44154+static struct acl_role_label *default_role;
44155+
44156+static struct acl_role_label *role_list;
44157+
44158+static u16 acl_sp_role_value;
44159+
44160+extern char *gr_shared_page[4];
44161+static DEFINE_MUTEX(gr_dev_mutex);
44162+DEFINE_RWLOCK(gr_inode_lock);
44163+
44164+struct gr_arg *gr_usermode;
44165+
44166+static unsigned int gr_status __read_only = GR_STATUS_INIT;
44167+
44168+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
44169+extern void gr_clear_learn_entries(void);
44170+
44171+#ifdef CONFIG_GRKERNSEC_RESLOG
44172+extern void gr_log_resource(const struct task_struct *task,
44173+ const int res, const unsigned long wanted, const int gt);
44174+#endif
44175+
44176+unsigned char *gr_system_salt;
44177+unsigned char *gr_system_sum;
44178+
44179+static struct sprole_pw **acl_special_roles = NULL;
44180+static __u16 num_sprole_pws = 0;
44181+
44182+static struct acl_role_label *kernel_role = NULL;
44183+
44184+static unsigned int gr_auth_attempts = 0;
44185+static unsigned long gr_auth_expires = 0UL;
44186+
44187+#ifdef CONFIG_NET
44188+extern struct vfsmount *sock_mnt;
44189+#endif
44190+
44191+extern struct vfsmount *pipe_mnt;
44192+extern struct vfsmount *shm_mnt;
44193+#ifdef CONFIG_HUGETLBFS
44194+extern struct vfsmount *hugetlbfs_vfsmount;
44195+#endif
44196+
44197+static struct acl_object_label *fakefs_obj_rw;
44198+static struct acl_object_label *fakefs_obj_rwx;
44199+
44200+extern int gr_init_uidset(void);
44201+extern void gr_free_uidset(void);
44202+extern void gr_remove_uid(uid_t uid);
44203+extern int gr_find_uid(uid_t uid);
44204+
44205+DECLARE_BRLOCK(vfsmount_lock);
44206+
44207+__inline__ int
44208+gr_acl_is_enabled(void)
44209+{
44210+ return (gr_status & GR_READY);
44211+}
44212+
44213+#ifdef CONFIG_BTRFS_FS
44214+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
44215+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
44216+#endif
44217+
44218+static inline dev_t __get_dev(const struct dentry *dentry)
44219+{
44220+#ifdef CONFIG_BTRFS_FS
44221+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
44222+ return get_btrfs_dev_from_inode(dentry->d_inode);
44223+ else
44224+#endif
44225+ return dentry->d_inode->i_sb->s_dev;
44226+}
44227+
44228+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
44229+{
44230+ return __get_dev(dentry);
44231+}
44232+
44233+static char gr_task_roletype_to_char(struct task_struct *task)
44234+{
44235+ switch (task->role->roletype &
44236+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
44237+ GR_ROLE_SPECIAL)) {
44238+ case GR_ROLE_DEFAULT:
44239+ return 'D';
44240+ case GR_ROLE_USER:
44241+ return 'U';
44242+ case GR_ROLE_GROUP:
44243+ return 'G';
44244+ case GR_ROLE_SPECIAL:
44245+ return 'S';
44246+ }
44247+
44248+ return 'X';
44249+}
44250+
44251+char gr_roletype_to_char(void)
44252+{
44253+ return gr_task_roletype_to_char(current);
44254+}
44255+
44256+__inline__ int
44257+gr_acl_tpe_check(void)
44258+{
44259+ if (unlikely(!(gr_status & GR_READY)))
44260+ return 0;
44261+ if (current->role->roletype & GR_ROLE_TPE)
44262+ return 1;
44263+ else
44264+ return 0;
44265+}
44266+
44267+int
44268+gr_handle_rawio(const struct inode *inode)
44269+{
44270+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
44271+ if (inode && S_ISBLK(inode->i_mode) &&
44272+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
44273+ !capable(CAP_SYS_RAWIO))
44274+ return 1;
44275+#endif
44276+ return 0;
44277+}
44278+
44279+static int
44280+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
44281+{
44282+ if (likely(lena != lenb))
44283+ return 0;
44284+
44285+ return !memcmp(a, b, lena);
44286+}
44287+
44288+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
44289+{
44290+ *buflen -= namelen;
44291+ if (*buflen < 0)
44292+ return -ENAMETOOLONG;
44293+ *buffer -= namelen;
44294+ memcpy(*buffer, str, namelen);
44295+ return 0;
44296+}
44297+
44298+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
44299+{
44300+ return prepend(buffer, buflen, name->name, name->len);
44301+}
44302+
44303+static int prepend_path(const struct path *path, struct path *root,
44304+ char **buffer, int *buflen)
44305+{
44306+ struct dentry *dentry = path->dentry;
44307+ struct vfsmount *vfsmnt = path->mnt;
44308+ bool slash = false;
44309+ int error = 0;
44310+
44311+ while (dentry != root->dentry || vfsmnt != root->mnt) {
44312+ struct dentry * parent;
44313+
44314+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
44315+ /* Global root? */
44316+ if (vfsmnt->mnt_parent == vfsmnt) {
44317+ goto out;
44318+ }
44319+ dentry = vfsmnt->mnt_mountpoint;
44320+ vfsmnt = vfsmnt->mnt_parent;
44321+ continue;
44322+ }
44323+ parent = dentry->d_parent;
44324+ prefetch(parent);
44325+ spin_lock(&dentry->d_lock);
44326+ error = prepend_name(buffer, buflen, &dentry->d_name);
44327+ spin_unlock(&dentry->d_lock);
44328+ if (!error)
44329+ error = prepend(buffer, buflen, "/", 1);
44330+ if (error)
44331+ break;
44332+
44333+ slash = true;
44334+ dentry = parent;
44335+ }
44336+
44337+out:
44338+ if (!error && !slash)
44339+ error = prepend(buffer, buflen, "/", 1);
44340+
44341+ return error;
44342+}
44343+
44344+/* this must be called with vfsmount_lock and rename_lock held */
44345+
44346+static char *__our_d_path(const struct path *path, struct path *root,
44347+ char *buf, int buflen)
44348+{
44349+ char *res = buf + buflen;
44350+ int error;
44351+
44352+ prepend(&res, &buflen, "\0", 1);
44353+ error = prepend_path(path, root, &res, &buflen);
44354+ if (error)
44355+ return ERR_PTR(error);
44356+
44357+ return res;
44358+}
44359+
44360+static char *
44361+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
44362+{
44363+ char *retval;
44364+
44365+ retval = __our_d_path(path, root, buf, buflen);
44366+ if (unlikely(IS_ERR(retval)))
44367+ retval = strcpy(buf, "<path too long>");
44368+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
44369+ retval[1] = '\0';
44370+
44371+ return retval;
44372+}
44373+
44374+static char *
44375+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44376+ char *buf, int buflen)
44377+{
44378+ struct path path;
44379+ char *res;
44380+
44381+ path.dentry = (struct dentry *)dentry;
44382+ path.mnt = (struct vfsmount *)vfsmnt;
44383+
44384+ /* we can use real_root.dentry, real_root.mnt, because this is only called
44385+ by the RBAC system */
44386+ res = gen_full_path(&path, &real_root, buf, buflen);
44387+
44388+ return res;
44389+}
44390+
44391+static char *
44392+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44393+ char *buf, int buflen)
44394+{
44395+ char *res;
44396+ struct path path;
44397+ struct path root;
44398+ struct task_struct *reaper = &init_task;
44399+
44400+ path.dentry = (struct dentry *)dentry;
44401+ path.mnt = (struct vfsmount *)vfsmnt;
44402+
44403+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
44404+ get_fs_root(reaper->fs, &root);
44405+
44406+ write_seqlock(&rename_lock);
44407+ br_read_lock(vfsmount_lock);
44408+ res = gen_full_path(&path, &root, buf, buflen);
44409+ br_read_unlock(vfsmount_lock);
44410+ write_sequnlock(&rename_lock);
44411+
44412+ path_put(&root);
44413+ return res;
44414+}
44415+
44416+static char *
44417+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
44418+{
44419+ char *ret;
44420+ write_seqlock(&rename_lock);
44421+ br_read_lock(vfsmount_lock);
44422+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44423+ PAGE_SIZE);
44424+ br_read_unlock(vfsmount_lock);
44425+ write_sequnlock(&rename_lock);
44426+ return ret;
44427+}
44428+
44429+char *
44430+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
44431+{
44432+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44433+ PAGE_SIZE);
44434+}
44435+
44436+char *
44437+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
44438+{
44439+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
44440+ PAGE_SIZE);
44441+}
44442+
44443+char *
44444+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
44445+{
44446+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
44447+ PAGE_SIZE);
44448+}
44449+
44450+char *
44451+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
44452+{
44453+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
44454+ PAGE_SIZE);
44455+}
44456+
44457+char *
44458+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
44459+{
44460+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
44461+ PAGE_SIZE);
44462+}
44463+
44464+__inline__ __u32
44465+to_gr_audit(const __u32 reqmode)
44466+{
44467+ /* masks off auditable permission flags, then shifts them to create
44468+ auditing flags, and adds the special case of append auditing if
44469+ we're requesting write */
44470+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
44471+}
44472+
44473+struct acl_subject_label *
44474+lookup_subject_map(const struct acl_subject_label *userp)
44475+{
44476+ unsigned int index = shash(userp, subj_map_set.s_size);
44477+ struct subject_map *match;
44478+
44479+ match = subj_map_set.s_hash[index];
44480+
44481+ while (match && match->user != userp)
44482+ match = match->next;
44483+
44484+ if (match != NULL)
44485+ return match->kernel;
44486+ else
44487+ return NULL;
44488+}
44489+
44490+static void
44491+insert_subj_map_entry(struct subject_map *subjmap)
44492+{
44493+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
44494+ struct subject_map **curr;
44495+
44496+ subjmap->prev = NULL;
44497+
44498+ curr = &subj_map_set.s_hash[index];
44499+ if (*curr != NULL)
44500+ (*curr)->prev = subjmap;
44501+
44502+ subjmap->next = *curr;
44503+ *curr = subjmap;
44504+
44505+ return;
44506+}
44507+
44508+static struct acl_role_label *
44509+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
44510+ const gid_t gid)
44511+{
44512+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
44513+ struct acl_role_label *match;
44514+ struct role_allowed_ip *ipp;
44515+ unsigned int x;
44516+ u32 curr_ip = task->signal->curr_ip;
44517+
44518+ task->signal->saved_ip = curr_ip;
44519+
44520+ match = acl_role_set.r_hash[index];
44521+
44522+ while (match) {
44523+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
44524+ for (x = 0; x < match->domain_child_num; x++) {
44525+ if (match->domain_children[x] == uid)
44526+ goto found;
44527+ }
44528+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
44529+ break;
44530+ match = match->next;
44531+ }
44532+found:
44533+ if (match == NULL) {
44534+ try_group:
44535+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
44536+ match = acl_role_set.r_hash[index];
44537+
44538+ while (match) {
44539+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
44540+ for (x = 0; x < match->domain_child_num; x++) {
44541+ if (match->domain_children[x] == gid)
44542+ goto found2;
44543+ }
44544+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
44545+ break;
44546+ match = match->next;
44547+ }
44548+found2:
44549+ if (match == NULL)
44550+ match = default_role;
44551+ if (match->allowed_ips == NULL)
44552+ return match;
44553+ else {
44554+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44555+ if (likely
44556+ ((ntohl(curr_ip) & ipp->netmask) ==
44557+ (ntohl(ipp->addr) & ipp->netmask)))
44558+ return match;
44559+ }
44560+ match = default_role;
44561+ }
44562+ } else if (match->allowed_ips == NULL) {
44563+ return match;
44564+ } else {
44565+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44566+ if (likely
44567+ ((ntohl(curr_ip) & ipp->netmask) ==
44568+ (ntohl(ipp->addr) & ipp->netmask)))
44569+ return match;
44570+ }
44571+ goto try_group;
44572+ }
44573+
44574+ return match;
44575+}
44576+
44577+struct acl_subject_label *
44578+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
44579+ const struct acl_role_label *role)
44580+{
44581+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
44582+ struct acl_subject_label *match;
44583+
44584+ match = role->subj_hash[index];
44585+
44586+ while (match && (match->inode != ino || match->device != dev ||
44587+ (match->mode & GR_DELETED))) {
44588+ match = match->next;
44589+ }
44590+
44591+ if (match && !(match->mode & GR_DELETED))
44592+ return match;
44593+ else
44594+ return NULL;
44595+}
44596+
44597+struct acl_subject_label *
44598+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
44599+ const struct acl_role_label *role)
44600+{
44601+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
44602+ struct acl_subject_label *match;
44603+
44604+ match = role->subj_hash[index];
44605+
44606+ while (match && (match->inode != ino || match->device != dev ||
44607+ !(match->mode & GR_DELETED))) {
44608+ match = match->next;
44609+ }
44610+
44611+ if (match && (match->mode & GR_DELETED))
44612+ return match;
44613+ else
44614+ return NULL;
44615+}
44616+
44617+static struct acl_object_label *
44618+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
44619+ const struct acl_subject_label *subj)
44620+{
44621+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44622+ struct acl_object_label *match;
44623+
44624+ match = subj->obj_hash[index];
44625+
44626+ while (match && (match->inode != ino || match->device != dev ||
44627+ (match->mode & GR_DELETED))) {
44628+ match = match->next;
44629+ }
44630+
44631+ if (match && !(match->mode & GR_DELETED))
44632+ return match;
44633+ else
44634+ return NULL;
44635+}
44636+
44637+static struct acl_object_label *
44638+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
44639+ const struct acl_subject_label *subj)
44640+{
44641+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44642+ struct acl_object_label *match;
44643+
44644+ match = subj->obj_hash[index];
44645+
44646+ while (match && (match->inode != ino || match->device != dev ||
44647+ !(match->mode & GR_DELETED))) {
44648+ match = match->next;
44649+ }
44650+
44651+ if (match && (match->mode & GR_DELETED))
44652+ return match;
44653+
44654+ match = subj->obj_hash[index];
44655+
44656+ while (match && (match->inode != ino || match->device != dev ||
44657+ (match->mode & GR_DELETED))) {
44658+ match = match->next;
44659+ }
44660+
44661+ if (match && !(match->mode & GR_DELETED))
44662+ return match;
44663+ else
44664+ return NULL;
44665+}
44666+
44667+static struct name_entry *
44668+lookup_name_entry(const char *name)
44669+{
44670+ unsigned int len = strlen(name);
44671+ unsigned int key = full_name_hash(name, len);
44672+ unsigned int index = key % name_set.n_size;
44673+ struct name_entry *match;
44674+
44675+ match = name_set.n_hash[index];
44676+
44677+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
44678+ match = match->next;
44679+
44680+ return match;
44681+}
44682+
44683+static struct name_entry *
44684+lookup_name_entry_create(const char *name)
44685+{
44686+ unsigned int len = strlen(name);
44687+ unsigned int key = full_name_hash(name, len);
44688+ unsigned int index = key % name_set.n_size;
44689+ struct name_entry *match;
44690+
44691+ match = name_set.n_hash[index];
44692+
44693+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44694+ !match->deleted))
44695+ match = match->next;
44696+
44697+ if (match && match->deleted)
44698+ return match;
44699+
44700+ match = name_set.n_hash[index];
44701+
44702+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44703+ match->deleted))
44704+ match = match->next;
44705+
44706+ if (match && !match->deleted)
44707+ return match;
44708+ else
44709+ return NULL;
44710+}
44711+
44712+static struct inodev_entry *
44713+lookup_inodev_entry(const ino_t ino, const dev_t dev)
44714+{
44715+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
44716+ struct inodev_entry *match;
44717+
44718+ match = inodev_set.i_hash[index];
44719+
44720+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
44721+ match = match->next;
44722+
44723+ return match;
44724+}
44725+
44726+static void
44727+insert_inodev_entry(struct inodev_entry *entry)
44728+{
44729+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
44730+ inodev_set.i_size);
44731+ struct inodev_entry **curr;
44732+
44733+ entry->prev = NULL;
44734+
44735+ curr = &inodev_set.i_hash[index];
44736+ if (*curr != NULL)
44737+ (*curr)->prev = entry;
44738+
44739+ entry->next = *curr;
44740+ *curr = entry;
44741+
44742+ return;
44743+}
44744+
44745+static void
44746+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
44747+{
44748+ unsigned int index =
44749+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
44750+ struct acl_role_label **curr;
44751+ struct acl_role_label *tmp;
44752+
44753+ curr = &acl_role_set.r_hash[index];
44754+
44755+ /* if role was already inserted due to domains and already has
44756+ a role in the same bucket as it attached, then we need to
44757+ combine these two buckets
44758+ */
44759+ if (role->next) {
44760+ tmp = role->next;
44761+ while (tmp->next)
44762+ tmp = tmp->next;
44763+ tmp->next = *curr;
44764+ } else
44765+ role->next = *curr;
44766+ *curr = role;
44767+
44768+ return;
44769+}
44770+
44771+static void
44772+insert_acl_role_label(struct acl_role_label *role)
44773+{
44774+ int i;
44775+
44776+ if (role_list == NULL) {
44777+ role_list = role;
44778+ role->prev = NULL;
44779+ } else {
44780+ role->prev = role_list;
44781+ role_list = role;
44782+ }
44783+
44784+ /* used for hash chains */
44785+ role->next = NULL;
44786+
44787+ if (role->roletype & GR_ROLE_DOMAIN) {
44788+ for (i = 0; i < role->domain_child_num; i++)
44789+ __insert_acl_role_label(role, role->domain_children[i]);
44790+ } else
44791+ __insert_acl_role_label(role, role->uidgid);
44792+}
44793+
44794+static int
44795+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
44796+{
44797+ struct name_entry **curr, *nentry;
44798+ struct inodev_entry *ientry;
44799+ unsigned int len = strlen(name);
44800+ unsigned int key = full_name_hash(name, len);
44801+ unsigned int index = key % name_set.n_size;
44802+
44803+ curr = &name_set.n_hash[index];
44804+
44805+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
44806+ curr = &((*curr)->next);
44807+
44808+ if (*curr != NULL)
44809+ return 1;
44810+
44811+ nentry = acl_alloc(sizeof (struct name_entry));
44812+ if (nentry == NULL)
44813+ return 0;
44814+ ientry = acl_alloc(sizeof (struct inodev_entry));
44815+ if (ientry == NULL)
44816+ return 0;
44817+ ientry->nentry = nentry;
44818+
44819+ nentry->key = key;
44820+ nentry->name = name;
44821+ nentry->inode = inode;
44822+ nentry->device = device;
44823+ nentry->len = len;
44824+ nentry->deleted = deleted;
44825+
44826+ nentry->prev = NULL;
44827+ curr = &name_set.n_hash[index];
44828+ if (*curr != NULL)
44829+ (*curr)->prev = nentry;
44830+ nentry->next = *curr;
44831+ *curr = nentry;
44832+
44833+ /* insert us into the table searchable by inode/dev */
44834+ insert_inodev_entry(ientry);
44835+
44836+ return 1;
44837+}
44838+
44839+static void
44840+insert_acl_obj_label(struct acl_object_label *obj,
44841+ struct acl_subject_label *subj)
44842+{
44843+ unsigned int index =
44844+ fhash(obj->inode, obj->device, subj->obj_hash_size);
44845+ struct acl_object_label **curr;
44846+
44847+
44848+ obj->prev = NULL;
44849+
44850+ curr = &subj->obj_hash[index];
44851+ if (*curr != NULL)
44852+ (*curr)->prev = obj;
44853+
44854+ obj->next = *curr;
44855+ *curr = obj;
44856+
44857+ return;
44858+}
44859+
44860+static void
44861+insert_acl_subj_label(struct acl_subject_label *obj,
44862+ struct acl_role_label *role)
44863+{
44864+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
44865+ struct acl_subject_label **curr;
44866+
44867+ obj->prev = NULL;
44868+
44869+ curr = &role->subj_hash[index];
44870+ if (*curr != NULL)
44871+ (*curr)->prev = obj;
44872+
44873+ obj->next = *curr;
44874+ *curr = obj;
44875+
44876+ return;
44877+}
44878+
44879+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
44880+
44881+static void *
44882+create_table(__u32 * len, int elementsize)
44883+{
44884+ unsigned int table_sizes[] = {
44885+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
44886+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
44887+ 4194301, 8388593, 16777213, 33554393, 67108859
44888+ };
44889+ void *newtable = NULL;
44890+ unsigned int pwr = 0;
44891+
44892+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
44893+ table_sizes[pwr] <= *len)
44894+ pwr++;
44895+
44896+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
44897+ return newtable;
44898+
44899+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
44900+ newtable =
44901+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
44902+ else
44903+ newtable = vmalloc(table_sizes[pwr] * elementsize);
44904+
44905+ *len = table_sizes[pwr];
44906+
44907+ return newtable;
44908+}
44909+
44910+static int
44911+init_variables(const struct gr_arg *arg)
44912+{
44913+ struct task_struct *reaper = &init_task;
44914+ unsigned int stacksize;
44915+
44916+ subj_map_set.s_size = arg->role_db.num_subjects;
44917+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
44918+ name_set.n_size = arg->role_db.num_objects;
44919+ inodev_set.i_size = arg->role_db.num_objects;
44920+
44921+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
44922+ !name_set.n_size || !inodev_set.i_size)
44923+ return 1;
44924+
44925+ if (!gr_init_uidset())
44926+ return 1;
44927+
44928+ /* set up the stack that holds allocation info */
44929+
44930+ stacksize = arg->role_db.num_pointers + 5;
44931+
44932+ if (!acl_alloc_stack_init(stacksize))
44933+ return 1;
44934+
44935+ /* grab reference for the real root dentry and vfsmount */
44936+ get_fs_root(reaper->fs, &real_root);
44937+
44938+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44939+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
44940+#endif
44941+
44942+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
44943+ if (fakefs_obj_rw == NULL)
44944+ return 1;
44945+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
44946+
44947+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
44948+ if (fakefs_obj_rwx == NULL)
44949+ return 1;
44950+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
44951+
44952+ subj_map_set.s_hash =
44953+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
44954+ acl_role_set.r_hash =
44955+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
44956+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
44957+ inodev_set.i_hash =
44958+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
44959+
44960+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
44961+ !name_set.n_hash || !inodev_set.i_hash)
44962+ return 1;
44963+
44964+ memset(subj_map_set.s_hash, 0,
44965+ sizeof(struct subject_map *) * subj_map_set.s_size);
44966+ memset(acl_role_set.r_hash, 0,
44967+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
44968+ memset(name_set.n_hash, 0,
44969+ sizeof (struct name_entry *) * name_set.n_size);
44970+ memset(inodev_set.i_hash, 0,
44971+ sizeof (struct inodev_entry *) * inodev_set.i_size);
44972+
44973+ return 0;
44974+}
44975+
44976+/* free information not needed after startup
44977+ currently contains user->kernel pointer mappings for subjects
44978+*/
44979+
44980+static void
44981+free_init_variables(void)
44982+{
44983+ __u32 i;
44984+
44985+ if (subj_map_set.s_hash) {
44986+ for (i = 0; i < subj_map_set.s_size; i++) {
44987+ if (subj_map_set.s_hash[i]) {
44988+ kfree(subj_map_set.s_hash[i]);
44989+ subj_map_set.s_hash[i] = NULL;
44990+ }
44991+ }
44992+
44993+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
44994+ PAGE_SIZE)
44995+ kfree(subj_map_set.s_hash);
44996+ else
44997+ vfree(subj_map_set.s_hash);
44998+ }
44999+
45000+ return;
45001+}
45002+
45003+static void
45004+free_variables(void)
45005+{
45006+ struct acl_subject_label *s;
45007+ struct acl_role_label *r;
45008+ struct task_struct *task, *task2;
45009+ unsigned int x;
45010+
45011+ gr_clear_learn_entries();
45012+
45013+ read_lock(&tasklist_lock);
45014+ do_each_thread(task2, task) {
45015+ task->acl_sp_role = 0;
45016+ task->acl_role_id = 0;
45017+ task->acl = NULL;
45018+ task->role = NULL;
45019+ } while_each_thread(task2, task);
45020+ read_unlock(&tasklist_lock);
45021+
45022+ /* release the reference to the real root dentry and vfsmount */
45023+ path_put(&real_root);
45024+
45025+ /* free all object hash tables */
45026+
45027+ FOR_EACH_ROLE_START(r)
45028+ if (r->subj_hash == NULL)
45029+ goto next_role;
45030+ FOR_EACH_SUBJECT_START(r, s, x)
45031+ if (s->obj_hash == NULL)
45032+ break;
45033+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45034+ kfree(s->obj_hash);
45035+ else
45036+ vfree(s->obj_hash);
45037+ FOR_EACH_SUBJECT_END(s, x)
45038+ FOR_EACH_NESTED_SUBJECT_START(r, s)
45039+ if (s->obj_hash == NULL)
45040+ break;
45041+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45042+ kfree(s->obj_hash);
45043+ else
45044+ vfree(s->obj_hash);
45045+ FOR_EACH_NESTED_SUBJECT_END(s)
45046+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
45047+ kfree(r->subj_hash);
45048+ else
45049+ vfree(r->subj_hash);
45050+ r->subj_hash = NULL;
45051+next_role:
45052+ FOR_EACH_ROLE_END(r)
45053+
45054+ acl_free_all();
45055+
45056+ if (acl_role_set.r_hash) {
45057+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
45058+ PAGE_SIZE)
45059+ kfree(acl_role_set.r_hash);
45060+ else
45061+ vfree(acl_role_set.r_hash);
45062+ }
45063+ if (name_set.n_hash) {
45064+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
45065+ PAGE_SIZE)
45066+ kfree(name_set.n_hash);
45067+ else
45068+ vfree(name_set.n_hash);
45069+ }
45070+
45071+ if (inodev_set.i_hash) {
45072+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
45073+ PAGE_SIZE)
45074+ kfree(inodev_set.i_hash);
45075+ else
45076+ vfree(inodev_set.i_hash);
45077+ }
45078+
45079+ gr_free_uidset();
45080+
45081+ memset(&name_set, 0, sizeof (struct name_db));
45082+ memset(&inodev_set, 0, sizeof (struct inodev_db));
45083+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
45084+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
45085+
45086+ default_role = NULL;
45087+ role_list = NULL;
45088+
45089+ return;
45090+}
45091+
45092+static __u32
45093+count_user_objs(struct acl_object_label *userp)
45094+{
45095+ struct acl_object_label o_tmp;
45096+ __u32 num = 0;
45097+
45098+ while (userp) {
45099+ if (copy_from_user(&o_tmp, userp,
45100+ sizeof (struct acl_object_label)))
45101+ break;
45102+
45103+ userp = o_tmp.prev;
45104+ num++;
45105+ }
45106+
45107+ return num;
45108+}
45109+
45110+static struct acl_subject_label *
45111+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
45112+
45113+static int
45114+copy_user_glob(struct acl_object_label *obj)
45115+{
45116+ struct acl_object_label *g_tmp, **guser;
45117+ unsigned int len;
45118+ char *tmp;
45119+
45120+ if (obj->globbed == NULL)
45121+ return 0;
45122+
45123+ guser = &obj->globbed;
45124+ while (*guser) {
45125+ g_tmp = (struct acl_object_label *)
45126+ acl_alloc(sizeof (struct acl_object_label));
45127+ if (g_tmp == NULL)
45128+ return -ENOMEM;
45129+
45130+ if (copy_from_user(g_tmp, *guser,
45131+ sizeof (struct acl_object_label)))
45132+ return -EFAULT;
45133+
45134+ len = strnlen_user(g_tmp->filename, PATH_MAX);
45135+
45136+ if (!len || len >= PATH_MAX)
45137+ return -EINVAL;
45138+
45139+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45140+ return -ENOMEM;
45141+
45142+ if (copy_from_user(tmp, g_tmp->filename, len))
45143+ return -EFAULT;
45144+ tmp[len-1] = '\0';
45145+ g_tmp->filename = tmp;
45146+
45147+ *guser = g_tmp;
45148+ guser = &(g_tmp->next);
45149+ }
45150+
45151+ return 0;
45152+}
45153+
45154+static int
45155+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
45156+ struct acl_role_label *role)
45157+{
45158+ struct acl_object_label *o_tmp;
45159+ unsigned int len;
45160+ int ret;
45161+ char *tmp;
45162+
45163+ while (userp) {
45164+ if ((o_tmp = (struct acl_object_label *)
45165+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
45166+ return -ENOMEM;
45167+
45168+ if (copy_from_user(o_tmp, userp,
45169+ sizeof (struct acl_object_label)))
45170+ return -EFAULT;
45171+
45172+ userp = o_tmp->prev;
45173+
45174+ len = strnlen_user(o_tmp->filename, PATH_MAX);
45175+
45176+ if (!len || len >= PATH_MAX)
45177+ return -EINVAL;
45178+
45179+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45180+ return -ENOMEM;
45181+
45182+ if (copy_from_user(tmp, o_tmp->filename, len))
45183+ return -EFAULT;
45184+ tmp[len-1] = '\0';
45185+ o_tmp->filename = tmp;
45186+
45187+ insert_acl_obj_label(o_tmp, subj);
45188+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
45189+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
45190+ return -ENOMEM;
45191+
45192+ ret = copy_user_glob(o_tmp);
45193+ if (ret)
45194+ return ret;
45195+
45196+ if (o_tmp->nested) {
45197+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
45198+ if (IS_ERR(o_tmp->nested))
45199+ return PTR_ERR(o_tmp->nested);
45200+
45201+ /* insert into nested subject list */
45202+ o_tmp->nested->next = role->hash->first;
45203+ role->hash->first = o_tmp->nested;
45204+ }
45205+ }
45206+
45207+ return 0;
45208+}
45209+
45210+static __u32
45211+count_user_subjs(struct acl_subject_label *userp)
45212+{
45213+ struct acl_subject_label s_tmp;
45214+ __u32 num = 0;
45215+
45216+ while (userp) {
45217+ if (copy_from_user(&s_tmp, userp,
45218+ sizeof (struct acl_subject_label)))
45219+ break;
45220+
45221+ userp = s_tmp.prev;
45222+ /* do not count nested subjects against this count, since
45223+ they are not included in the hash table, but are
45224+ attached to objects. We have already counted
45225+ the subjects in userspace for the allocation
45226+ stack
45227+ */
45228+ if (!(s_tmp.mode & GR_NESTED))
45229+ num++;
45230+ }
45231+
45232+ return num;
45233+}
45234+
45235+static int
45236+copy_user_allowedips(struct acl_role_label *rolep)
45237+{
45238+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
45239+
45240+ ruserip = rolep->allowed_ips;
45241+
45242+ while (ruserip) {
45243+ rlast = rtmp;
45244+
45245+ if ((rtmp = (struct role_allowed_ip *)
45246+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
45247+ return -ENOMEM;
45248+
45249+ if (copy_from_user(rtmp, ruserip,
45250+ sizeof (struct role_allowed_ip)))
45251+ return -EFAULT;
45252+
45253+ ruserip = rtmp->prev;
45254+
45255+ if (!rlast) {
45256+ rtmp->prev = NULL;
45257+ rolep->allowed_ips = rtmp;
45258+ } else {
45259+ rlast->next = rtmp;
45260+ rtmp->prev = rlast;
45261+ }
45262+
45263+ if (!ruserip)
45264+ rtmp->next = NULL;
45265+ }
45266+
45267+ return 0;
45268+}
45269+
45270+static int
45271+copy_user_transitions(struct acl_role_label *rolep)
45272+{
45273+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
45274+
45275+ unsigned int len;
45276+ char *tmp;
45277+
45278+ rusertp = rolep->transitions;
45279+
45280+ while (rusertp) {
45281+ rlast = rtmp;
45282+
45283+ if ((rtmp = (struct role_transition *)
45284+ acl_alloc(sizeof (struct role_transition))) == NULL)
45285+ return -ENOMEM;
45286+
45287+ if (copy_from_user(rtmp, rusertp,
45288+ sizeof (struct role_transition)))
45289+ return -EFAULT;
45290+
45291+ rusertp = rtmp->prev;
45292+
45293+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
45294+
45295+ if (!len || len >= GR_SPROLE_LEN)
45296+ return -EINVAL;
45297+
45298+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45299+ return -ENOMEM;
45300+
45301+ if (copy_from_user(tmp, rtmp->rolename, len))
45302+ return -EFAULT;
45303+ tmp[len-1] = '\0';
45304+ rtmp->rolename = tmp;
45305+
45306+ if (!rlast) {
45307+ rtmp->prev = NULL;
45308+ rolep->transitions = rtmp;
45309+ } else {
45310+ rlast->next = rtmp;
45311+ rtmp->prev = rlast;
45312+ }
45313+
45314+ if (!rusertp)
45315+ rtmp->next = NULL;
45316+ }
45317+
45318+ return 0;
45319+}
45320+
45321+static struct acl_subject_label *
45322+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
45323+{
45324+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
45325+ unsigned int len;
45326+ char *tmp;
45327+ __u32 num_objs;
45328+ struct acl_ip_label **i_tmp, *i_utmp2;
45329+ struct gr_hash_struct ghash;
45330+ struct subject_map *subjmap;
45331+ unsigned int i_num;
45332+ int err;
45333+
45334+ s_tmp = lookup_subject_map(userp);
45335+
45336+ /* we've already copied this subject into the kernel, just return
45337+ the reference to it, and don't copy it over again
45338+ */
45339+ if (s_tmp)
45340+ return(s_tmp);
45341+
45342+ if ((s_tmp = (struct acl_subject_label *)
45343+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
45344+ return ERR_PTR(-ENOMEM);
45345+
45346+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
45347+ if (subjmap == NULL)
45348+ return ERR_PTR(-ENOMEM);
45349+
45350+ subjmap->user = userp;
45351+ subjmap->kernel = s_tmp;
45352+ insert_subj_map_entry(subjmap);
45353+
45354+ if (copy_from_user(s_tmp, userp,
45355+ sizeof (struct acl_subject_label)))
45356+ return ERR_PTR(-EFAULT);
45357+
45358+ len = strnlen_user(s_tmp->filename, PATH_MAX);
45359+
45360+ if (!len || len >= PATH_MAX)
45361+ return ERR_PTR(-EINVAL);
45362+
45363+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45364+ return ERR_PTR(-ENOMEM);
45365+
45366+ if (copy_from_user(tmp, s_tmp->filename, len))
45367+ return ERR_PTR(-EFAULT);
45368+ tmp[len-1] = '\0';
45369+ s_tmp->filename = tmp;
45370+
45371+ if (!strcmp(s_tmp->filename, "/"))
45372+ role->root_label = s_tmp;
45373+
45374+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
45375+ return ERR_PTR(-EFAULT);
45376+
45377+ /* copy user and group transition tables */
45378+
45379+ if (s_tmp->user_trans_num) {
45380+ uid_t *uidlist;
45381+
45382+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
45383+ if (uidlist == NULL)
45384+ return ERR_PTR(-ENOMEM);
45385+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
45386+ return ERR_PTR(-EFAULT);
45387+
45388+ s_tmp->user_transitions = uidlist;
45389+ }
45390+
45391+ if (s_tmp->group_trans_num) {
45392+ gid_t *gidlist;
45393+
45394+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
45395+ if (gidlist == NULL)
45396+ return ERR_PTR(-ENOMEM);
45397+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
45398+ return ERR_PTR(-EFAULT);
45399+
45400+ s_tmp->group_transitions = gidlist;
45401+ }
45402+
45403+ /* set up object hash table */
45404+ num_objs = count_user_objs(ghash.first);
45405+
45406+ s_tmp->obj_hash_size = num_objs;
45407+ s_tmp->obj_hash =
45408+ (struct acl_object_label **)
45409+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
45410+
45411+ if (!s_tmp->obj_hash)
45412+ return ERR_PTR(-ENOMEM);
45413+
45414+ memset(s_tmp->obj_hash, 0,
45415+ s_tmp->obj_hash_size *
45416+ sizeof (struct acl_object_label *));
45417+
45418+ /* add in objects */
45419+ err = copy_user_objs(ghash.first, s_tmp, role);
45420+
45421+ if (err)
45422+ return ERR_PTR(err);
45423+
45424+ /* set pointer for parent subject */
45425+ if (s_tmp->parent_subject) {
45426+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
45427+
45428+ if (IS_ERR(s_tmp2))
45429+ return s_tmp2;
45430+
45431+ s_tmp->parent_subject = s_tmp2;
45432+ }
45433+
45434+ /* add in ip acls */
45435+
45436+ if (!s_tmp->ip_num) {
45437+ s_tmp->ips = NULL;
45438+ goto insert;
45439+ }
45440+
45441+ i_tmp =
45442+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
45443+ sizeof (struct acl_ip_label *));
45444+
45445+ if (!i_tmp)
45446+ return ERR_PTR(-ENOMEM);
45447+
45448+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
45449+ *(i_tmp + i_num) =
45450+ (struct acl_ip_label *)
45451+ acl_alloc(sizeof (struct acl_ip_label));
45452+ if (!*(i_tmp + i_num))
45453+ return ERR_PTR(-ENOMEM);
45454+
45455+ if (copy_from_user
45456+ (&i_utmp2, s_tmp->ips + i_num,
45457+ sizeof (struct acl_ip_label *)))
45458+ return ERR_PTR(-EFAULT);
45459+
45460+ if (copy_from_user
45461+ (*(i_tmp + i_num), i_utmp2,
45462+ sizeof (struct acl_ip_label)))
45463+ return ERR_PTR(-EFAULT);
45464+
45465+ if ((*(i_tmp + i_num))->iface == NULL)
45466+ continue;
45467+
45468+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
45469+ if (!len || len >= IFNAMSIZ)
45470+ return ERR_PTR(-EINVAL);
45471+ tmp = acl_alloc(len);
45472+ if (tmp == NULL)
45473+ return ERR_PTR(-ENOMEM);
45474+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
45475+ return ERR_PTR(-EFAULT);
45476+ (*(i_tmp + i_num))->iface = tmp;
45477+ }
45478+
45479+ s_tmp->ips = i_tmp;
45480+
45481+insert:
45482+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
45483+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
45484+ return ERR_PTR(-ENOMEM);
45485+
45486+ return s_tmp;
45487+}
45488+
45489+static int
45490+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
45491+{
45492+ struct acl_subject_label s_pre;
45493+ struct acl_subject_label * ret;
45494+ int err;
45495+
45496+ while (userp) {
45497+ if (copy_from_user(&s_pre, userp,
45498+ sizeof (struct acl_subject_label)))
45499+ return -EFAULT;
45500+
45501+ /* do not add nested subjects here, add
45502+ while parsing objects
45503+ */
45504+
45505+ if (s_pre.mode & GR_NESTED) {
45506+ userp = s_pre.prev;
45507+ continue;
45508+ }
45509+
45510+ ret = do_copy_user_subj(userp, role);
45511+
45512+ err = PTR_ERR(ret);
45513+ if (IS_ERR(ret))
45514+ return err;
45515+
45516+ insert_acl_subj_label(ret, role);
45517+
45518+ userp = s_pre.prev;
45519+ }
45520+
45521+ return 0;
45522+}
45523+
45524+static int
45525+copy_user_acl(struct gr_arg *arg)
45526+{
45527+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
45528+ struct sprole_pw *sptmp;
45529+ struct gr_hash_struct *ghash;
45530+ uid_t *domainlist;
45531+ unsigned int r_num;
45532+ unsigned int len;
45533+ char *tmp;
45534+ int err = 0;
45535+ __u16 i;
45536+ __u32 num_subjs;
45537+
45538+ /* we need a default and kernel role */
45539+ if (arg->role_db.num_roles < 2)
45540+ return -EINVAL;
45541+
45542+ /* copy special role authentication info from userspace */
45543+
45544+ num_sprole_pws = arg->num_sprole_pws;
45545+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
45546+
45547+ if (!acl_special_roles) {
45548+ err = -ENOMEM;
45549+ goto cleanup;
45550+ }
45551+
45552+ for (i = 0; i < num_sprole_pws; i++) {
45553+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
45554+ if (!sptmp) {
45555+ err = -ENOMEM;
45556+ goto cleanup;
45557+ }
45558+ if (copy_from_user(sptmp, arg->sprole_pws + i,
45559+ sizeof (struct sprole_pw))) {
45560+ err = -EFAULT;
45561+ goto cleanup;
45562+ }
45563+
45564+ len =
45565+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
45566+
45567+ if (!len || len >= GR_SPROLE_LEN) {
45568+ err = -EINVAL;
45569+ goto cleanup;
45570+ }
45571+
45572+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
45573+ err = -ENOMEM;
45574+ goto cleanup;
45575+ }
45576+
45577+ if (copy_from_user(tmp, sptmp->rolename, len)) {
45578+ err = -EFAULT;
45579+ goto cleanup;
45580+ }
45581+ tmp[len-1] = '\0';
45582+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45583+ printk(KERN_ALERT "Copying special role %s\n", tmp);
45584+#endif
45585+ sptmp->rolename = tmp;
45586+ acl_special_roles[i] = sptmp;
45587+ }
45588+
45589+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
45590+
45591+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
45592+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
45593+
45594+ if (!r_tmp) {
45595+ err = -ENOMEM;
45596+ goto cleanup;
45597+ }
45598+
45599+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
45600+ sizeof (struct acl_role_label *))) {
45601+ err = -EFAULT;
45602+ goto cleanup;
45603+ }
45604+
45605+ if (copy_from_user(r_tmp, r_utmp2,
45606+ sizeof (struct acl_role_label))) {
45607+ err = -EFAULT;
45608+ goto cleanup;
45609+ }
45610+
45611+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
45612+
45613+ if (!len || len >= PATH_MAX) {
45614+ err = -EINVAL;
45615+ goto cleanup;
45616+ }
45617+
45618+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
45619+ err = -ENOMEM;
45620+ goto cleanup;
45621+ }
45622+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
45623+ err = -EFAULT;
45624+ goto cleanup;
45625+ }
45626+ tmp[len-1] = '\0';
45627+ r_tmp->rolename = tmp;
45628+
45629+ if (!strcmp(r_tmp->rolename, "default")
45630+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
45631+ default_role = r_tmp;
45632+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
45633+ kernel_role = r_tmp;
45634+ }
45635+
45636+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
45637+ err = -ENOMEM;
45638+ goto cleanup;
45639+ }
45640+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
45641+ err = -EFAULT;
45642+ goto cleanup;
45643+ }
45644+
45645+ r_tmp->hash = ghash;
45646+
45647+ num_subjs = count_user_subjs(r_tmp->hash->first);
45648+
45649+ r_tmp->subj_hash_size = num_subjs;
45650+ r_tmp->subj_hash =
45651+ (struct acl_subject_label **)
45652+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
45653+
45654+ if (!r_tmp->subj_hash) {
45655+ err = -ENOMEM;
45656+ goto cleanup;
45657+ }
45658+
45659+ err = copy_user_allowedips(r_tmp);
45660+ if (err)
45661+ goto cleanup;
45662+
45663+ /* copy domain info */
45664+ if (r_tmp->domain_children != NULL) {
45665+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
45666+ if (domainlist == NULL) {
45667+ err = -ENOMEM;
45668+ goto cleanup;
45669+ }
45670+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
45671+ err = -EFAULT;
45672+ goto cleanup;
45673+ }
45674+ r_tmp->domain_children = domainlist;
45675+ }
45676+
45677+ err = copy_user_transitions(r_tmp);
45678+ if (err)
45679+ goto cleanup;
45680+
45681+ memset(r_tmp->subj_hash, 0,
45682+ r_tmp->subj_hash_size *
45683+ sizeof (struct acl_subject_label *));
45684+
45685+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
45686+
45687+ if (err)
45688+ goto cleanup;
45689+
45690+ /* set nested subject list to null */
45691+ r_tmp->hash->first = NULL;
45692+
45693+ insert_acl_role_label(r_tmp);
45694+ }
45695+
45696+ goto return_err;
45697+ cleanup:
45698+ free_variables();
45699+ return_err:
45700+ return err;
45701+
45702+}
45703+
45704+static int
45705+gracl_init(struct gr_arg *args)
45706+{
45707+ int error = 0;
45708+
45709+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
45710+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
45711+
45712+ if (init_variables(args)) {
45713+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
45714+ error = -ENOMEM;
45715+ free_variables();
45716+ goto out;
45717+ }
45718+
45719+ error = copy_user_acl(args);
45720+ free_init_variables();
45721+ if (error) {
45722+ free_variables();
45723+ goto out;
45724+ }
45725+
45726+ if ((error = gr_set_acls(0))) {
45727+ free_variables();
45728+ goto out;
45729+ }
45730+
45731+ pax_open_kernel();
45732+ gr_status |= GR_READY;
45733+ pax_close_kernel();
45734+
45735+ out:
45736+ return error;
45737+}
45738+
45739+/* derived from glibc fnmatch() 0: match, 1: no match*/
45740+
45741+static int
45742+glob_match(const char *p, const char *n)
45743+{
45744+ char c;
45745+
45746+ while ((c = *p++) != '\0') {
45747+ switch (c) {
45748+ case '?':
45749+ if (*n == '\0')
45750+ return 1;
45751+ else if (*n == '/')
45752+ return 1;
45753+ break;
45754+ case '\\':
45755+ if (*n != c)
45756+ return 1;
45757+ break;
45758+ case '*':
45759+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
45760+ if (*n == '/')
45761+ return 1;
45762+ else if (c == '?') {
45763+ if (*n == '\0')
45764+ return 1;
45765+ else
45766+ ++n;
45767+ }
45768+ }
45769+ if (c == '\0') {
45770+ return 0;
45771+ } else {
45772+ const char *endp;
45773+
45774+ if ((endp = strchr(n, '/')) == NULL)
45775+ endp = n + strlen(n);
45776+
45777+ if (c == '[') {
45778+ for (--p; n < endp; ++n)
45779+ if (!glob_match(p, n))
45780+ return 0;
45781+ } else if (c == '/') {
45782+ while (*n != '\0' && *n != '/')
45783+ ++n;
45784+ if (*n == '/' && !glob_match(p, n + 1))
45785+ return 0;
45786+ } else {
45787+ for (--p; n < endp; ++n)
45788+ if (*n == c && !glob_match(p, n))
45789+ return 0;
45790+ }
45791+
45792+ return 1;
45793+ }
45794+ case '[':
45795+ {
45796+ int not;
45797+ char cold;
45798+
45799+ if (*n == '\0' || *n == '/')
45800+ return 1;
45801+
45802+ not = (*p == '!' || *p == '^');
45803+ if (not)
45804+ ++p;
45805+
45806+ c = *p++;
45807+ for (;;) {
45808+ unsigned char fn = (unsigned char)*n;
45809+
45810+ if (c == '\0')
45811+ return 1;
45812+ else {
45813+ if (c == fn)
45814+ goto matched;
45815+ cold = c;
45816+ c = *p++;
45817+
45818+ if (c == '-' && *p != ']') {
45819+ unsigned char cend = *p++;
45820+
45821+ if (cend == '\0')
45822+ return 1;
45823+
45824+ if (cold <= fn && fn <= cend)
45825+ goto matched;
45826+
45827+ c = *p++;
45828+ }
45829+ }
45830+
45831+ if (c == ']')
45832+ break;
45833+ }
45834+ if (!not)
45835+ return 1;
45836+ break;
45837+ matched:
45838+ while (c != ']') {
45839+ if (c == '\0')
45840+ return 1;
45841+
45842+ c = *p++;
45843+ }
45844+ if (not)
45845+ return 1;
45846+ }
45847+ break;
45848+ default:
45849+ if (c != *n)
45850+ return 1;
45851+ }
45852+
45853+ ++n;
45854+ }
45855+
45856+ if (*n == '\0')
45857+ return 0;
45858+
45859+ if (*n == '/')
45860+ return 0;
45861+
45862+ return 1;
45863+}
45864+
45865+static struct acl_object_label *
45866+chk_glob_label(struct acl_object_label *globbed,
45867+ struct dentry *dentry, struct vfsmount *mnt, char **path)
45868+{
45869+ struct acl_object_label *tmp;
45870+
45871+ if (*path == NULL)
45872+ *path = gr_to_filename_nolock(dentry, mnt);
45873+
45874+ tmp = globbed;
45875+
45876+ while (tmp) {
45877+ if (!glob_match(tmp->filename, *path))
45878+ return tmp;
45879+ tmp = tmp->next;
45880+ }
45881+
45882+ return NULL;
45883+}
45884+
45885+static struct acl_object_label *
45886+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45887+ const ino_t curr_ino, const dev_t curr_dev,
45888+ const struct acl_subject_label *subj, char **path, const int checkglob)
45889+{
45890+ struct acl_subject_label *tmpsubj;
45891+ struct acl_object_label *retval;
45892+ struct acl_object_label *retval2;
45893+
45894+ tmpsubj = (struct acl_subject_label *) subj;
45895+ read_lock(&gr_inode_lock);
45896+ do {
45897+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
45898+ if (retval) {
45899+ if (checkglob && retval->globbed) {
45900+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
45901+ (struct vfsmount *)orig_mnt, path);
45902+ if (retval2)
45903+ retval = retval2;
45904+ }
45905+ break;
45906+ }
45907+ } while ((tmpsubj = tmpsubj->parent_subject));
45908+ read_unlock(&gr_inode_lock);
45909+
45910+ return retval;
45911+}
45912+
45913+static __inline__ struct acl_object_label *
45914+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45915+ struct dentry *curr_dentry,
45916+ const struct acl_subject_label *subj, char **path, const int checkglob)
45917+{
45918+ int newglob = checkglob;
45919+ ino_t inode;
45920+ dev_t device;
45921+
45922+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
45923+ as we don't want a / * rule to match instead of the / object
45924+ don't do this for create lookups that call this function though, since they're looking up
45925+ on the parent and thus need globbing checks on all paths
45926+ */
45927+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
45928+ newglob = GR_NO_GLOB;
45929+
45930+ spin_lock(&curr_dentry->d_lock);
45931+ inode = curr_dentry->d_inode->i_ino;
45932+ device = __get_dev(curr_dentry);
45933+ spin_unlock(&curr_dentry->d_lock);
45934+
45935+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
45936+}
45937+
45938+static struct acl_object_label *
45939+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45940+ const struct acl_subject_label *subj, char *path, const int checkglob)
45941+{
45942+ struct dentry *dentry = (struct dentry *) l_dentry;
45943+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45944+ struct acl_object_label *retval;
45945+ struct dentry *parent;
45946+
45947+ write_seqlock(&rename_lock);
45948+ br_read_lock(vfsmount_lock);
45949+
45950+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
45951+#ifdef CONFIG_NET
45952+ mnt == sock_mnt ||
45953+#endif
45954+#ifdef CONFIG_HUGETLBFS
45955+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
45956+#endif
45957+ /* ignore Eric Biederman */
45958+ IS_PRIVATE(l_dentry->d_inode))) {
45959+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
45960+ goto out;
45961+ }
45962+
45963+ for (;;) {
45964+ if (dentry == real_root.dentry && mnt == real_root.mnt)
45965+ break;
45966+
45967+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45968+ if (mnt->mnt_parent == mnt)
45969+ break;
45970+
45971+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45972+ if (retval != NULL)
45973+ goto out;
45974+
45975+ dentry = mnt->mnt_mountpoint;
45976+ mnt = mnt->mnt_parent;
45977+ continue;
45978+ }
45979+
45980+ parent = dentry->d_parent;
45981+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45982+ if (retval != NULL)
45983+ goto out;
45984+
45985+ dentry = parent;
45986+ }
45987+
45988+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45989+
45990+ /* real_root is pinned so we don't have to hold a reference */
45991+ if (retval == NULL)
45992+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
45993+out:
45994+ br_read_unlock(vfsmount_lock);
45995+ write_sequnlock(&rename_lock);
45996+
45997+ BUG_ON(retval == NULL);
45998+
45999+ return retval;
46000+}
46001+
46002+static __inline__ struct acl_object_label *
46003+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46004+ const struct acl_subject_label *subj)
46005+{
46006+ char *path = NULL;
46007+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
46008+}
46009+
46010+static __inline__ struct acl_object_label *
46011+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46012+ const struct acl_subject_label *subj)
46013+{
46014+ char *path = NULL;
46015+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
46016+}
46017+
46018+static __inline__ struct acl_object_label *
46019+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46020+ const struct acl_subject_label *subj, char *path)
46021+{
46022+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
46023+}
46024+
46025+static struct acl_subject_label *
46026+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46027+ const struct acl_role_label *role)
46028+{
46029+ struct dentry *dentry = (struct dentry *) l_dentry;
46030+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
46031+ struct acl_subject_label *retval;
46032+ struct dentry *parent;
46033+
46034+ write_seqlock(&rename_lock);
46035+ br_read_lock(vfsmount_lock);
46036+
46037+ for (;;) {
46038+ if (dentry == real_root.dentry && mnt == real_root.mnt)
46039+ break;
46040+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
46041+ if (mnt->mnt_parent == mnt)
46042+ break;
46043+
46044+ spin_lock(&dentry->d_lock);
46045+ read_lock(&gr_inode_lock);
46046+ retval =
46047+ lookup_acl_subj_label(dentry->d_inode->i_ino,
46048+ __get_dev(dentry), role);
46049+ read_unlock(&gr_inode_lock);
46050+ spin_unlock(&dentry->d_lock);
46051+ if (retval != NULL)
46052+ goto out;
46053+
46054+ dentry = mnt->mnt_mountpoint;
46055+ mnt = mnt->mnt_parent;
46056+ continue;
46057+ }
46058+
46059+ spin_lock(&dentry->d_lock);
46060+ read_lock(&gr_inode_lock);
46061+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46062+ __get_dev(dentry), role);
46063+ read_unlock(&gr_inode_lock);
46064+ parent = dentry->d_parent;
46065+ spin_unlock(&dentry->d_lock);
46066+
46067+ if (retval != NULL)
46068+ goto out;
46069+
46070+ dentry = parent;
46071+ }
46072+
46073+ spin_lock(&dentry->d_lock);
46074+ read_lock(&gr_inode_lock);
46075+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46076+ __get_dev(dentry), role);
46077+ read_unlock(&gr_inode_lock);
46078+ spin_unlock(&dentry->d_lock);
46079+
46080+ if (unlikely(retval == NULL)) {
46081+ /* real_root is pinned, we don't need to hold a reference */
46082+ read_lock(&gr_inode_lock);
46083+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
46084+ __get_dev(real_root.dentry), role);
46085+ read_unlock(&gr_inode_lock);
46086+ }
46087+out:
46088+ br_read_unlock(vfsmount_lock);
46089+ write_sequnlock(&rename_lock);
46090+
46091+ BUG_ON(retval == NULL);
46092+
46093+ return retval;
46094+}
46095+
46096+static void
46097+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
46098+{
46099+ struct task_struct *task = current;
46100+ const struct cred *cred = current_cred();
46101+
46102+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46103+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46104+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46105+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
46106+
46107+ return;
46108+}
46109+
46110+static void
46111+gr_log_learn_sysctl(const char *path, const __u32 mode)
46112+{
46113+ struct task_struct *task = current;
46114+ const struct cred *cred = current_cred();
46115+
46116+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46117+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46118+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46119+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
46120+
46121+ return;
46122+}
46123+
46124+static void
46125+gr_log_learn_id_change(const char type, const unsigned int real,
46126+ const unsigned int effective, const unsigned int fs)
46127+{
46128+ struct task_struct *task = current;
46129+ const struct cred *cred = current_cred();
46130+
46131+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
46132+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46133+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46134+ type, real, effective, fs, &task->signal->saved_ip);
46135+
46136+ return;
46137+}
46138+
46139+__u32
46140+gr_check_link(const struct dentry * new_dentry,
46141+ const struct dentry * parent_dentry,
46142+ const struct vfsmount * parent_mnt,
46143+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
46144+{
46145+ struct acl_object_label *obj;
46146+ __u32 oldmode, newmode;
46147+ __u32 needmode;
46148+
46149+ if (unlikely(!(gr_status & GR_READY)))
46150+ return (GR_CREATE | GR_LINK);
46151+
46152+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
46153+ oldmode = obj->mode;
46154+
46155+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46156+ oldmode |= (GR_CREATE | GR_LINK);
46157+
46158+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
46159+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46160+ needmode |= GR_SETID | GR_AUDIT_SETID;
46161+
46162+ newmode =
46163+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
46164+ oldmode | needmode);
46165+
46166+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
46167+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
46168+ GR_INHERIT | GR_AUDIT_INHERIT);
46169+
46170+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
46171+ goto bad;
46172+
46173+ if ((oldmode & needmode) != needmode)
46174+ goto bad;
46175+
46176+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
46177+ if ((newmode & needmode) != needmode)
46178+ goto bad;
46179+
46180+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
46181+ return newmode;
46182+bad:
46183+ needmode = oldmode;
46184+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46185+ needmode |= GR_SETID;
46186+
46187+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46188+ gr_log_learn(old_dentry, old_mnt, needmode);
46189+ return (GR_CREATE | GR_LINK);
46190+ } else if (newmode & GR_SUPPRESS)
46191+ return GR_SUPPRESS;
46192+ else
46193+ return 0;
46194+}
46195+
46196+__u32
46197+gr_search_file(const struct dentry * dentry, const __u32 mode,
46198+ const struct vfsmount * mnt)
46199+{
46200+ __u32 retval = mode;
46201+ struct acl_subject_label *curracl;
46202+ struct acl_object_label *currobj;
46203+
46204+ if (unlikely(!(gr_status & GR_READY)))
46205+ return (mode & ~GR_AUDITS);
46206+
46207+ curracl = current->acl;
46208+
46209+ currobj = chk_obj_label(dentry, mnt, curracl);
46210+ retval = currobj->mode & mode;
46211+
46212+ /* if we're opening a specified transfer file for writing
46213+ (e.g. /dev/initctl), then transfer our role to init
46214+ */
46215+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
46216+ current->role->roletype & GR_ROLE_PERSIST)) {
46217+ struct task_struct *task = init_pid_ns.child_reaper;
46218+
46219+ if (task->role != current->role) {
46220+ task->acl_sp_role = 0;
46221+ task->acl_role_id = current->acl_role_id;
46222+ task->role = current->role;
46223+ rcu_read_lock();
46224+ read_lock(&grsec_exec_file_lock);
46225+ gr_apply_subject_to_task(task);
46226+ read_unlock(&grsec_exec_file_lock);
46227+ rcu_read_unlock();
46228+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
46229+ }
46230+ }
46231+
46232+ if (unlikely
46233+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
46234+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
46235+ __u32 new_mode = mode;
46236+
46237+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46238+
46239+ retval = new_mode;
46240+
46241+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
46242+ new_mode |= GR_INHERIT;
46243+
46244+ if (!(mode & GR_NOLEARN))
46245+ gr_log_learn(dentry, mnt, new_mode);
46246+ }
46247+
46248+ return retval;
46249+}
46250+
46251+__u32
46252+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
46253+ const struct vfsmount * mnt, const __u32 mode)
46254+{
46255+ struct name_entry *match;
46256+ struct acl_object_label *matchpo;
46257+ struct acl_subject_label *curracl;
46258+ char *path;
46259+ __u32 retval;
46260+
46261+ if (unlikely(!(gr_status & GR_READY)))
46262+ return (mode & ~GR_AUDITS);
46263+
46264+ preempt_disable();
46265+ path = gr_to_filename_rbac(new_dentry, mnt);
46266+ match = lookup_name_entry_create(path);
46267+
46268+ if (!match)
46269+ goto check_parent;
46270+
46271+ curracl = current->acl;
46272+
46273+ read_lock(&gr_inode_lock);
46274+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
46275+ read_unlock(&gr_inode_lock);
46276+
46277+ if (matchpo) {
46278+ if ((matchpo->mode & mode) !=
46279+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
46280+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46281+ __u32 new_mode = mode;
46282+
46283+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46284+
46285+ gr_log_learn(new_dentry, mnt, new_mode);
46286+
46287+ preempt_enable();
46288+ return new_mode;
46289+ }
46290+ preempt_enable();
46291+ return (matchpo->mode & mode);
46292+ }
46293+
46294+ check_parent:
46295+ curracl = current->acl;
46296+
46297+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
46298+ retval = matchpo->mode & mode;
46299+
46300+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
46301+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
46302+ __u32 new_mode = mode;
46303+
46304+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46305+
46306+ gr_log_learn(new_dentry, mnt, new_mode);
46307+ preempt_enable();
46308+ return new_mode;
46309+ }
46310+
46311+ preempt_enable();
46312+ return retval;
46313+}
46314+
46315+int
46316+gr_check_hidden_task(const struct task_struct *task)
46317+{
46318+ if (unlikely(!(gr_status & GR_READY)))
46319+ return 0;
46320+
46321+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
46322+ return 1;
46323+
46324+ return 0;
46325+}
46326+
46327+int
46328+gr_check_protected_task(const struct task_struct *task)
46329+{
46330+ if (unlikely(!(gr_status & GR_READY) || !task))
46331+ return 0;
46332+
46333+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46334+ task->acl != current->acl)
46335+ return 1;
46336+
46337+ return 0;
46338+}
46339+
46340+int
46341+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
46342+{
46343+ struct task_struct *p;
46344+ int ret = 0;
46345+
46346+ if (unlikely(!(gr_status & GR_READY) || !pid))
46347+ return ret;
46348+
46349+ read_lock(&tasklist_lock);
46350+ do_each_pid_task(pid, type, p) {
46351+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46352+ p->acl != current->acl) {
46353+ ret = 1;
46354+ goto out;
46355+ }
46356+ } while_each_pid_task(pid, type, p);
46357+out:
46358+ read_unlock(&tasklist_lock);
46359+
46360+ return ret;
46361+}
46362+
46363+void
46364+gr_copy_label(struct task_struct *tsk)
46365+{
46366+ tsk->signal->used_accept = 0;
46367+ tsk->acl_sp_role = 0;
46368+ tsk->acl_role_id = current->acl_role_id;
46369+ tsk->acl = current->acl;
46370+ tsk->role = current->role;
46371+ tsk->signal->curr_ip = current->signal->curr_ip;
46372+ tsk->signal->saved_ip = current->signal->saved_ip;
46373+ if (current->exec_file)
46374+ get_file(current->exec_file);
46375+ tsk->exec_file = current->exec_file;
46376+ tsk->is_writable = current->is_writable;
46377+ if (unlikely(current->signal->used_accept)) {
46378+ current->signal->curr_ip = 0;
46379+ current->signal->saved_ip = 0;
46380+ }
46381+
46382+ return;
46383+}
46384+
46385+static void
46386+gr_set_proc_res(struct task_struct *task)
46387+{
46388+ struct acl_subject_label *proc;
46389+ unsigned short i;
46390+
46391+ proc = task->acl;
46392+
46393+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
46394+ return;
46395+
46396+ for (i = 0; i < RLIM_NLIMITS; i++) {
46397+ if (!(proc->resmask & (1 << i)))
46398+ continue;
46399+
46400+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
46401+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
46402+ }
46403+
46404+ return;
46405+}
46406+
46407+extern int __gr_process_user_ban(struct user_struct *user);
46408+
46409+int
46410+gr_check_user_change(int real, int effective, int fs)
46411+{
46412+ unsigned int i;
46413+ __u16 num;
46414+ uid_t *uidlist;
46415+ int curuid;
46416+ int realok = 0;
46417+ int effectiveok = 0;
46418+ int fsok = 0;
46419+
46420+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
46421+ struct user_struct *user;
46422+
46423+ if (real == -1)
46424+ goto skipit;
46425+
46426+ user = find_user(real);
46427+ if (user == NULL)
46428+ goto skipit;
46429+
46430+ if (__gr_process_user_ban(user)) {
46431+ /* for find_user */
46432+ free_uid(user);
46433+ return 1;
46434+ }
46435+
46436+ /* for find_user */
46437+ free_uid(user);
46438+
46439+skipit:
46440+#endif
46441+
46442+ if (unlikely(!(gr_status & GR_READY)))
46443+ return 0;
46444+
46445+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46446+ gr_log_learn_id_change('u', real, effective, fs);
46447+
46448+ num = current->acl->user_trans_num;
46449+ uidlist = current->acl->user_transitions;
46450+
46451+ if (uidlist == NULL)
46452+ return 0;
46453+
46454+ if (real == -1)
46455+ realok = 1;
46456+ if (effective == -1)
46457+ effectiveok = 1;
46458+ if (fs == -1)
46459+ fsok = 1;
46460+
46461+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
46462+ for (i = 0; i < num; i++) {
46463+ curuid = (int)uidlist[i];
46464+ if (real == curuid)
46465+ realok = 1;
46466+ if (effective == curuid)
46467+ effectiveok = 1;
46468+ if (fs == curuid)
46469+ fsok = 1;
46470+ }
46471+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
46472+ for (i = 0; i < num; i++) {
46473+ curuid = (int)uidlist[i];
46474+ if (real == curuid)
46475+ break;
46476+ if (effective == curuid)
46477+ break;
46478+ if (fs == curuid)
46479+ break;
46480+ }
46481+ /* not in deny list */
46482+ if (i == num) {
46483+ realok = 1;
46484+ effectiveok = 1;
46485+ fsok = 1;
46486+ }
46487+ }
46488+
46489+ if (realok && effectiveok && fsok)
46490+ return 0;
46491+ else {
46492+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46493+ return 1;
46494+ }
46495+}
46496+
46497+int
46498+gr_check_group_change(int real, int effective, int fs)
46499+{
46500+ unsigned int i;
46501+ __u16 num;
46502+ gid_t *gidlist;
46503+ int curgid;
46504+ int realok = 0;
46505+ int effectiveok = 0;
46506+ int fsok = 0;
46507+
46508+ if (unlikely(!(gr_status & GR_READY)))
46509+ return 0;
46510+
46511+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46512+ gr_log_learn_id_change('g', real, effective, fs);
46513+
46514+ num = current->acl->group_trans_num;
46515+ gidlist = current->acl->group_transitions;
46516+
46517+ if (gidlist == NULL)
46518+ return 0;
46519+
46520+ if (real == -1)
46521+ realok = 1;
46522+ if (effective == -1)
46523+ effectiveok = 1;
46524+ if (fs == -1)
46525+ fsok = 1;
46526+
46527+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
46528+ for (i = 0; i < num; i++) {
46529+ curgid = (int)gidlist[i];
46530+ if (real == curgid)
46531+ realok = 1;
46532+ if (effective == curgid)
46533+ effectiveok = 1;
46534+ if (fs == curgid)
46535+ fsok = 1;
46536+ }
46537+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
46538+ for (i = 0; i < num; i++) {
46539+ curgid = (int)gidlist[i];
46540+ if (real == curgid)
46541+ break;
46542+ if (effective == curgid)
46543+ break;
46544+ if (fs == curgid)
46545+ break;
46546+ }
46547+ /* not in deny list */
46548+ if (i == num) {
46549+ realok = 1;
46550+ effectiveok = 1;
46551+ fsok = 1;
46552+ }
46553+ }
46554+
46555+ if (realok && effectiveok && fsok)
46556+ return 0;
46557+ else {
46558+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46559+ return 1;
46560+ }
46561+}
46562+
46563+void
46564+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
46565+{
46566+ struct acl_role_label *role = task->role;
46567+ struct acl_subject_label *subj = NULL;
46568+ struct acl_object_label *obj;
46569+ struct file *filp;
46570+
46571+ if (unlikely(!(gr_status & GR_READY)))
46572+ return;
46573+
46574+ filp = task->exec_file;
46575+
46576+ /* kernel process, we'll give them the kernel role */
46577+ if (unlikely(!filp)) {
46578+ task->role = kernel_role;
46579+ task->acl = kernel_role->root_label;
46580+ return;
46581+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
46582+ role = lookup_acl_role_label(task, uid, gid);
46583+
46584+ /* perform subject lookup in possibly new role
46585+ we can use this result below in the case where role == task->role
46586+ */
46587+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
46588+
46589+ /* if we changed uid/gid, but result in the same role
46590+ and are using inheritance, don't lose the inherited subject
46591+ if current subject is other than what normal lookup
46592+ would result in, we arrived via inheritance, don't
46593+ lose subject
46594+ */
46595+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
46596+ (subj == task->acl)))
46597+ task->acl = subj;
46598+
46599+ task->role = role;
46600+
46601+ task->is_writable = 0;
46602+
46603+ /* ignore additional mmap checks for processes that are writable
46604+ by the default ACL */
46605+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46606+ if (unlikely(obj->mode & GR_WRITE))
46607+ task->is_writable = 1;
46608+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
46609+ if (unlikely(obj->mode & GR_WRITE))
46610+ task->is_writable = 1;
46611+
46612+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46613+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46614+#endif
46615+
46616+ gr_set_proc_res(task);
46617+
46618+ return;
46619+}
46620+
46621+int
46622+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
46623+ const int unsafe_share)
46624+{
46625+ struct task_struct *task = current;
46626+ struct acl_subject_label *newacl;
46627+ struct acl_object_label *obj;
46628+ __u32 retmode;
46629+
46630+ if (unlikely(!(gr_status & GR_READY)))
46631+ return 0;
46632+
46633+ newacl = chk_subj_label(dentry, mnt, task->role);
46634+
46635+ task_lock(task);
46636+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
46637+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
46638+ !(task->role->roletype & GR_ROLE_GOD) &&
46639+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
46640+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
46641+ task_unlock(task);
46642+ if (unsafe_share)
46643+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
46644+ else
46645+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
46646+ return -EACCES;
46647+ }
46648+ task_unlock(task);
46649+
46650+ obj = chk_obj_label(dentry, mnt, task->acl);
46651+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
46652+
46653+ if (!(task->acl->mode & GR_INHERITLEARN) &&
46654+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
46655+ if (obj->nested)
46656+ task->acl = obj->nested;
46657+ else
46658+ task->acl = newacl;
46659+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
46660+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
46661+
46662+ task->is_writable = 0;
46663+
46664+ /* ignore additional mmap checks for processes that are writable
46665+ by the default ACL */
46666+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
46667+ if (unlikely(obj->mode & GR_WRITE))
46668+ task->is_writable = 1;
46669+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
46670+ if (unlikely(obj->mode & GR_WRITE))
46671+ task->is_writable = 1;
46672+
46673+ gr_set_proc_res(task);
46674+
46675+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46676+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46677+#endif
46678+ return 0;
46679+}
46680+
46681+/* always called with valid inodev ptr */
46682+static void
46683+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
46684+{
46685+ struct acl_object_label *matchpo;
46686+ struct acl_subject_label *matchps;
46687+ struct acl_subject_label *subj;
46688+ struct acl_role_label *role;
46689+ unsigned int x;
46690+
46691+ FOR_EACH_ROLE_START(role)
46692+ FOR_EACH_SUBJECT_START(role, subj, x)
46693+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
46694+ matchpo->mode |= GR_DELETED;
46695+ FOR_EACH_SUBJECT_END(subj,x)
46696+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
46697+ if (subj->inode == ino && subj->device == dev)
46698+ subj->mode |= GR_DELETED;
46699+ FOR_EACH_NESTED_SUBJECT_END(subj)
46700+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
46701+ matchps->mode |= GR_DELETED;
46702+ FOR_EACH_ROLE_END(role)
46703+
46704+ inodev->nentry->deleted = 1;
46705+
46706+ return;
46707+}
46708+
46709+void
46710+gr_handle_delete(const ino_t ino, const dev_t dev)
46711+{
46712+ struct inodev_entry *inodev;
46713+
46714+ if (unlikely(!(gr_status & GR_READY)))
46715+ return;
46716+
46717+ write_lock(&gr_inode_lock);
46718+ inodev = lookup_inodev_entry(ino, dev);
46719+ if (inodev != NULL)
46720+ do_handle_delete(inodev, ino, dev);
46721+ write_unlock(&gr_inode_lock);
46722+
46723+ return;
46724+}
46725+
46726+static void
46727+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
46728+ const ino_t newinode, const dev_t newdevice,
46729+ struct acl_subject_label *subj)
46730+{
46731+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
46732+ struct acl_object_label *match;
46733+
46734+ match = subj->obj_hash[index];
46735+
46736+ while (match && (match->inode != oldinode ||
46737+ match->device != olddevice ||
46738+ !(match->mode & GR_DELETED)))
46739+ match = match->next;
46740+
46741+ if (match && (match->inode == oldinode)
46742+ && (match->device == olddevice)
46743+ && (match->mode & GR_DELETED)) {
46744+ if (match->prev == NULL) {
46745+ subj->obj_hash[index] = match->next;
46746+ if (match->next != NULL)
46747+ match->next->prev = NULL;
46748+ } else {
46749+ match->prev->next = match->next;
46750+ if (match->next != NULL)
46751+ match->next->prev = match->prev;
46752+ }
46753+ match->prev = NULL;
46754+ match->next = NULL;
46755+ match->inode = newinode;
46756+ match->device = newdevice;
46757+ match->mode &= ~GR_DELETED;
46758+
46759+ insert_acl_obj_label(match, subj);
46760+ }
46761+
46762+ return;
46763+}
46764+
46765+static void
46766+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
46767+ const ino_t newinode, const dev_t newdevice,
46768+ struct acl_role_label *role)
46769+{
46770+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
46771+ struct acl_subject_label *match;
46772+
46773+ match = role->subj_hash[index];
46774+
46775+ while (match && (match->inode != oldinode ||
46776+ match->device != olddevice ||
46777+ !(match->mode & GR_DELETED)))
46778+ match = match->next;
46779+
46780+ if (match && (match->inode == oldinode)
46781+ && (match->device == olddevice)
46782+ && (match->mode & GR_DELETED)) {
46783+ if (match->prev == NULL) {
46784+ role->subj_hash[index] = match->next;
46785+ if (match->next != NULL)
46786+ match->next->prev = NULL;
46787+ } else {
46788+ match->prev->next = match->next;
46789+ if (match->next != NULL)
46790+ match->next->prev = match->prev;
46791+ }
46792+ match->prev = NULL;
46793+ match->next = NULL;
46794+ match->inode = newinode;
46795+ match->device = newdevice;
46796+ match->mode &= ~GR_DELETED;
46797+
46798+ insert_acl_subj_label(match, role);
46799+ }
46800+
46801+ return;
46802+}
46803+
46804+static void
46805+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
46806+ const ino_t newinode, const dev_t newdevice)
46807+{
46808+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
46809+ struct inodev_entry *match;
46810+
46811+ match = inodev_set.i_hash[index];
46812+
46813+ while (match && (match->nentry->inode != oldinode ||
46814+ match->nentry->device != olddevice || !match->nentry->deleted))
46815+ match = match->next;
46816+
46817+ if (match && (match->nentry->inode == oldinode)
46818+ && (match->nentry->device == olddevice) &&
46819+ match->nentry->deleted) {
46820+ if (match->prev == NULL) {
46821+ inodev_set.i_hash[index] = match->next;
46822+ if (match->next != NULL)
46823+ match->next->prev = NULL;
46824+ } else {
46825+ match->prev->next = match->next;
46826+ if (match->next != NULL)
46827+ match->next->prev = match->prev;
46828+ }
46829+ match->prev = NULL;
46830+ match->next = NULL;
46831+ match->nentry->inode = newinode;
46832+ match->nentry->device = newdevice;
46833+ match->nentry->deleted = 0;
46834+
46835+ insert_inodev_entry(match);
46836+ }
46837+
46838+ return;
46839+}
46840+
46841+static void
46842+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
46843+ const struct vfsmount *mnt)
46844+{
46845+ struct acl_subject_label *subj;
46846+ struct acl_role_label *role;
46847+ unsigned int x;
46848+ ino_t ino = dentry->d_inode->i_ino;
46849+ dev_t dev = __get_dev(dentry);
46850+
46851+ FOR_EACH_ROLE_START(role)
46852+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
46853+
46854+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
46855+ if ((subj->inode == ino) && (subj->device == dev)) {
46856+ subj->inode = ino;
46857+ subj->device = dev;
46858+ }
46859+ FOR_EACH_NESTED_SUBJECT_END(subj)
46860+ FOR_EACH_SUBJECT_START(role, subj, x)
46861+ update_acl_obj_label(matchn->inode, matchn->device,
46862+ ino, dev, subj);
46863+ FOR_EACH_SUBJECT_END(subj,x)
46864+ FOR_EACH_ROLE_END(role)
46865+
46866+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
46867+
46868+ return;
46869+}
46870+
46871+void
46872+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
46873+{
46874+ struct name_entry *matchn;
46875+
46876+ if (unlikely(!(gr_status & GR_READY)))
46877+ return;
46878+
46879+ preempt_disable();
46880+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
46881+
46882+ if (unlikely((unsigned long)matchn)) {
46883+ write_lock(&gr_inode_lock);
46884+ do_handle_create(matchn, dentry, mnt);
46885+ write_unlock(&gr_inode_lock);
46886+ }
46887+ preempt_enable();
46888+
46889+ return;
46890+}
46891+
46892+void
46893+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
46894+ struct dentry *old_dentry,
46895+ struct dentry *new_dentry,
46896+ struct vfsmount *mnt, const __u8 replace)
46897+{
46898+ struct name_entry *matchn;
46899+ struct inodev_entry *inodev;
46900+ ino_t old_ino = old_dentry->d_inode->i_ino;
46901+ dev_t old_dev = __get_dev(old_dentry);
46902+
46903+ /* vfs_rename swaps the name and parent link for old_dentry and
46904+ new_dentry
46905+ at this point, old_dentry has the new name, parent link, and inode
46906+ for the renamed file
46907+ if a file is being replaced by a rename, new_dentry has the inode
46908+ and name for the replaced file
46909+ */
46910+
46911+ if (unlikely(!(gr_status & GR_READY)))
46912+ return;
46913+
46914+ preempt_disable();
46915+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
46916+
46917+ /* we wouldn't have to check d_inode if it weren't for
46918+ NFS silly-renaming
46919+ */
46920+
46921+ write_lock(&gr_inode_lock);
46922+ if (unlikely(replace && new_dentry->d_inode)) {
46923+ ino_t new_ino = new_dentry->d_inode->i_ino;
46924+ dev_t new_dev = __get_dev(new_dentry);
46925+
46926+ inodev = lookup_inodev_entry(new_ino, new_dev);
46927+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
46928+ do_handle_delete(inodev, new_ino, new_dev);
46929+ }
46930+
46931+ inodev = lookup_inodev_entry(old_ino, old_dev);
46932+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
46933+ do_handle_delete(inodev, old_ino, old_dev);
46934+
46935+ if (unlikely((unsigned long)matchn))
46936+ do_handle_create(matchn, old_dentry, mnt);
46937+
46938+ write_unlock(&gr_inode_lock);
46939+ preempt_enable();
46940+
46941+ return;
46942+}
46943+
46944+static int
46945+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
46946+ unsigned char **sum)
46947+{
46948+ struct acl_role_label *r;
46949+ struct role_allowed_ip *ipp;
46950+ struct role_transition *trans;
46951+ unsigned int i;
46952+ int found = 0;
46953+ u32 curr_ip = current->signal->curr_ip;
46954+
46955+ current->signal->saved_ip = curr_ip;
46956+
46957+ /* check transition table */
46958+
46959+ for (trans = current->role->transitions; trans; trans = trans->next) {
46960+ if (!strcmp(rolename, trans->rolename)) {
46961+ found = 1;
46962+ break;
46963+ }
46964+ }
46965+
46966+ if (!found)
46967+ return 0;
46968+
46969+ /* handle special roles that do not require authentication
46970+ and check ip */
46971+
46972+ FOR_EACH_ROLE_START(r)
46973+ if (!strcmp(rolename, r->rolename) &&
46974+ (r->roletype & GR_ROLE_SPECIAL)) {
46975+ found = 0;
46976+ if (r->allowed_ips != NULL) {
46977+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
46978+ if ((ntohl(curr_ip) & ipp->netmask) ==
46979+ (ntohl(ipp->addr) & ipp->netmask))
46980+ found = 1;
46981+ }
46982+ } else
46983+ found = 2;
46984+ if (!found)
46985+ return 0;
46986+
46987+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
46988+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
46989+ *salt = NULL;
46990+ *sum = NULL;
46991+ return 1;
46992+ }
46993+ }
46994+ FOR_EACH_ROLE_END(r)
46995+
46996+ for (i = 0; i < num_sprole_pws; i++) {
46997+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
46998+ *salt = acl_special_roles[i]->salt;
46999+ *sum = acl_special_roles[i]->sum;
47000+ return 1;
47001+ }
47002+ }
47003+
47004+ return 0;
47005+}
47006+
47007+static void
47008+assign_special_role(char *rolename)
47009+{
47010+ struct acl_object_label *obj;
47011+ struct acl_role_label *r;
47012+ struct acl_role_label *assigned = NULL;
47013+ struct task_struct *tsk;
47014+ struct file *filp;
47015+
47016+ FOR_EACH_ROLE_START(r)
47017+ if (!strcmp(rolename, r->rolename) &&
47018+ (r->roletype & GR_ROLE_SPECIAL)) {
47019+ assigned = r;
47020+ break;
47021+ }
47022+ FOR_EACH_ROLE_END(r)
47023+
47024+ if (!assigned)
47025+ return;
47026+
47027+ read_lock(&tasklist_lock);
47028+ read_lock(&grsec_exec_file_lock);
47029+
47030+ tsk = current->real_parent;
47031+ if (tsk == NULL)
47032+ goto out_unlock;
47033+
47034+ filp = tsk->exec_file;
47035+ if (filp == NULL)
47036+ goto out_unlock;
47037+
47038+ tsk->is_writable = 0;
47039+
47040+ tsk->acl_sp_role = 1;
47041+ tsk->acl_role_id = ++acl_sp_role_value;
47042+ tsk->role = assigned;
47043+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
47044+
47045+ /* ignore additional mmap checks for processes that are writable
47046+ by the default ACL */
47047+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47048+ if (unlikely(obj->mode & GR_WRITE))
47049+ tsk->is_writable = 1;
47050+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
47051+ if (unlikely(obj->mode & GR_WRITE))
47052+ tsk->is_writable = 1;
47053+
47054+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47055+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
47056+#endif
47057+
47058+out_unlock:
47059+ read_unlock(&grsec_exec_file_lock);
47060+ read_unlock(&tasklist_lock);
47061+ return;
47062+}
47063+
47064+int gr_check_secure_terminal(struct task_struct *task)
47065+{
47066+ struct task_struct *p, *p2, *p3;
47067+ struct files_struct *files;
47068+ struct fdtable *fdt;
47069+ struct file *our_file = NULL, *file;
47070+ int i;
47071+
47072+ if (task->signal->tty == NULL)
47073+ return 1;
47074+
47075+ files = get_files_struct(task);
47076+ if (files != NULL) {
47077+ rcu_read_lock();
47078+ fdt = files_fdtable(files);
47079+ for (i=0; i < fdt->max_fds; i++) {
47080+ file = fcheck_files(files, i);
47081+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
47082+ get_file(file);
47083+ our_file = file;
47084+ }
47085+ }
47086+ rcu_read_unlock();
47087+ put_files_struct(files);
47088+ }
47089+
47090+ if (our_file == NULL)
47091+ return 1;
47092+
47093+ read_lock(&tasklist_lock);
47094+ do_each_thread(p2, p) {
47095+ files = get_files_struct(p);
47096+ if (files == NULL ||
47097+ (p->signal && p->signal->tty == task->signal->tty)) {
47098+ if (files != NULL)
47099+ put_files_struct(files);
47100+ continue;
47101+ }
47102+ rcu_read_lock();
47103+ fdt = files_fdtable(files);
47104+ for (i=0; i < fdt->max_fds; i++) {
47105+ file = fcheck_files(files, i);
47106+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
47107+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
47108+ p3 = task;
47109+ while (p3->pid > 0) {
47110+ if (p3 == p)
47111+ break;
47112+ p3 = p3->real_parent;
47113+ }
47114+ if (p3 == p)
47115+ break;
47116+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
47117+ gr_handle_alertkill(p);
47118+ rcu_read_unlock();
47119+ put_files_struct(files);
47120+ read_unlock(&tasklist_lock);
47121+ fput(our_file);
47122+ return 0;
47123+ }
47124+ }
47125+ rcu_read_unlock();
47126+ put_files_struct(files);
47127+ } while_each_thread(p2, p);
47128+ read_unlock(&tasklist_lock);
47129+
47130+ fput(our_file);
47131+ return 1;
47132+}
47133+
47134+ssize_t
47135+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
47136+{
47137+ struct gr_arg_wrapper uwrap;
47138+ unsigned char *sprole_salt = NULL;
47139+ unsigned char *sprole_sum = NULL;
47140+ int error = sizeof (struct gr_arg_wrapper);
47141+ int error2 = 0;
47142+
47143+ mutex_lock(&gr_dev_mutex);
47144+
47145+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
47146+ error = -EPERM;
47147+ goto out;
47148+ }
47149+
47150+ if (count != sizeof (struct gr_arg_wrapper)) {
47151+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
47152+ error = -EINVAL;
47153+ goto out;
47154+ }
47155+
47156+
47157+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
47158+ gr_auth_expires = 0;
47159+ gr_auth_attempts = 0;
47160+ }
47161+
47162+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
47163+ error = -EFAULT;
47164+ goto out;
47165+ }
47166+
47167+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
47168+ error = -EINVAL;
47169+ goto out;
47170+ }
47171+
47172+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
47173+ error = -EFAULT;
47174+ goto out;
47175+ }
47176+
47177+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47178+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47179+ time_after(gr_auth_expires, get_seconds())) {
47180+ error = -EBUSY;
47181+ goto out;
47182+ }
47183+
47184+ /* if non-root trying to do anything other than use a special role,
47185+ do not attempt authentication, do not count towards authentication
47186+ locking
47187+ */
47188+
47189+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
47190+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47191+ current_uid()) {
47192+ error = -EPERM;
47193+ goto out;
47194+ }
47195+
47196+ /* ensure pw and special role name are null terminated */
47197+
47198+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
47199+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
47200+
47201+ /* Okay.
47202+ * We have our enough of the argument structure..(we have yet
47203+ * to copy_from_user the tables themselves) . Copy the tables
47204+ * only if we need them, i.e. for loading operations. */
47205+
47206+ switch (gr_usermode->mode) {
47207+ case GR_STATUS:
47208+ if (gr_status & GR_READY) {
47209+ error = 1;
47210+ if (!gr_check_secure_terminal(current))
47211+ error = 3;
47212+ } else
47213+ error = 2;
47214+ goto out;
47215+ case GR_SHUTDOWN:
47216+ if ((gr_status & GR_READY)
47217+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47218+ pax_open_kernel();
47219+ gr_status &= ~GR_READY;
47220+ pax_close_kernel();
47221+
47222+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
47223+ free_variables();
47224+ memset(gr_usermode, 0, sizeof (struct gr_arg));
47225+ memset(gr_system_salt, 0, GR_SALT_LEN);
47226+ memset(gr_system_sum, 0, GR_SHA_LEN);
47227+ } else if (gr_status & GR_READY) {
47228+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
47229+ error = -EPERM;
47230+ } else {
47231+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
47232+ error = -EAGAIN;
47233+ }
47234+ break;
47235+ case GR_ENABLE:
47236+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
47237+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
47238+ else {
47239+ if (gr_status & GR_READY)
47240+ error = -EAGAIN;
47241+ else
47242+ error = error2;
47243+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
47244+ }
47245+ break;
47246+ case GR_RELOAD:
47247+ if (!(gr_status & GR_READY)) {
47248+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
47249+ error = -EAGAIN;
47250+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47251+ preempt_disable();
47252+
47253+ pax_open_kernel();
47254+ gr_status &= ~GR_READY;
47255+ pax_close_kernel();
47256+
47257+ free_variables();
47258+ if (!(error2 = gracl_init(gr_usermode))) {
47259+ preempt_enable();
47260+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
47261+ } else {
47262+ preempt_enable();
47263+ error = error2;
47264+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47265+ }
47266+ } else {
47267+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47268+ error = -EPERM;
47269+ }
47270+ break;
47271+ case GR_SEGVMOD:
47272+ if (unlikely(!(gr_status & GR_READY))) {
47273+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
47274+ error = -EAGAIN;
47275+ break;
47276+ }
47277+
47278+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47279+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
47280+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
47281+ struct acl_subject_label *segvacl;
47282+ segvacl =
47283+ lookup_acl_subj_label(gr_usermode->segv_inode,
47284+ gr_usermode->segv_device,
47285+ current->role);
47286+ if (segvacl) {
47287+ segvacl->crashes = 0;
47288+ segvacl->expires = 0;
47289+ }
47290+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
47291+ gr_remove_uid(gr_usermode->segv_uid);
47292+ }
47293+ } else {
47294+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
47295+ error = -EPERM;
47296+ }
47297+ break;
47298+ case GR_SPROLE:
47299+ case GR_SPROLEPAM:
47300+ if (unlikely(!(gr_status & GR_READY))) {
47301+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
47302+ error = -EAGAIN;
47303+ break;
47304+ }
47305+
47306+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
47307+ current->role->expires = 0;
47308+ current->role->auth_attempts = 0;
47309+ }
47310+
47311+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47312+ time_after(current->role->expires, get_seconds())) {
47313+ error = -EBUSY;
47314+ goto out;
47315+ }
47316+
47317+ if (lookup_special_role_auth
47318+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
47319+ && ((!sprole_salt && !sprole_sum)
47320+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
47321+ char *p = "";
47322+ assign_special_role(gr_usermode->sp_role);
47323+ read_lock(&tasklist_lock);
47324+ if (current->real_parent)
47325+ p = current->real_parent->role->rolename;
47326+ read_unlock(&tasklist_lock);
47327+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
47328+ p, acl_sp_role_value);
47329+ } else {
47330+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
47331+ error = -EPERM;
47332+ if(!(current->role->auth_attempts++))
47333+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47334+
47335+ goto out;
47336+ }
47337+ break;
47338+ case GR_UNSPROLE:
47339+ if (unlikely(!(gr_status & GR_READY))) {
47340+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
47341+ error = -EAGAIN;
47342+ break;
47343+ }
47344+
47345+ if (current->role->roletype & GR_ROLE_SPECIAL) {
47346+ char *p = "";
47347+ int i = 0;
47348+
47349+ read_lock(&tasklist_lock);
47350+ if (current->real_parent) {
47351+ p = current->real_parent->role->rolename;
47352+ i = current->real_parent->acl_role_id;
47353+ }
47354+ read_unlock(&tasklist_lock);
47355+
47356+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
47357+ gr_set_acls(1);
47358+ } else {
47359+ error = -EPERM;
47360+ goto out;
47361+ }
47362+ break;
47363+ default:
47364+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
47365+ error = -EINVAL;
47366+ break;
47367+ }
47368+
47369+ if (error != -EPERM)
47370+ goto out;
47371+
47372+ if(!(gr_auth_attempts++))
47373+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47374+
47375+ out:
47376+ mutex_unlock(&gr_dev_mutex);
47377+ return error;
47378+}
47379+
47380+/* must be called with
47381+ rcu_read_lock();
47382+ read_lock(&tasklist_lock);
47383+ read_lock(&grsec_exec_file_lock);
47384+*/
47385+int gr_apply_subject_to_task(struct task_struct *task)
47386+{
47387+ struct acl_object_label *obj;
47388+ char *tmpname;
47389+ struct acl_subject_label *tmpsubj;
47390+ struct file *filp;
47391+ struct name_entry *nmatch;
47392+
47393+ filp = task->exec_file;
47394+ if (filp == NULL)
47395+ return 0;
47396+
47397+ /* the following is to apply the correct subject
47398+ on binaries running when the RBAC system
47399+ is enabled, when the binaries have been
47400+ replaced or deleted since their execution
47401+ -----
47402+ when the RBAC system starts, the inode/dev
47403+ from exec_file will be one the RBAC system
47404+ is unaware of. It only knows the inode/dev
47405+ of the present file on disk, or the absence
47406+ of it.
47407+ */
47408+ preempt_disable();
47409+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
47410+
47411+ nmatch = lookup_name_entry(tmpname);
47412+ preempt_enable();
47413+ tmpsubj = NULL;
47414+ if (nmatch) {
47415+ if (nmatch->deleted)
47416+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
47417+ else
47418+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
47419+ if (tmpsubj != NULL)
47420+ task->acl = tmpsubj;
47421+ }
47422+ if (tmpsubj == NULL)
47423+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
47424+ task->role);
47425+ if (task->acl) {
47426+ task->is_writable = 0;
47427+ /* ignore additional mmap checks for processes that are writable
47428+ by the default ACL */
47429+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47430+ if (unlikely(obj->mode & GR_WRITE))
47431+ task->is_writable = 1;
47432+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47433+ if (unlikely(obj->mode & GR_WRITE))
47434+ task->is_writable = 1;
47435+
47436+ gr_set_proc_res(task);
47437+
47438+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47439+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47440+#endif
47441+ } else {
47442+ return 1;
47443+ }
47444+
47445+ return 0;
47446+}
47447+
47448+int
47449+gr_set_acls(const int type)
47450+{
47451+ struct task_struct *task, *task2;
47452+ struct acl_role_label *role = current->role;
47453+ __u16 acl_role_id = current->acl_role_id;
47454+ const struct cred *cred;
47455+ int ret;
47456+
47457+ rcu_read_lock();
47458+ read_lock(&tasklist_lock);
47459+ read_lock(&grsec_exec_file_lock);
47460+ do_each_thread(task2, task) {
47461+ /* check to see if we're called from the exit handler,
47462+ if so, only replace ACLs that have inherited the admin
47463+ ACL */
47464+
47465+ if (type && (task->role != role ||
47466+ task->acl_role_id != acl_role_id))
47467+ continue;
47468+
47469+ task->acl_role_id = 0;
47470+ task->acl_sp_role = 0;
47471+
47472+ if (task->exec_file) {
47473+ cred = __task_cred(task);
47474+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
47475+ ret = gr_apply_subject_to_task(task);
47476+ if (ret) {
47477+ read_unlock(&grsec_exec_file_lock);
47478+ read_unlock(&tasklist_lock);
47479+ rcu_read_unlock();
47480+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
47481+ return ret;
47482+ }
47483+ } else {
47484+ // it's a kernel process
47485+ task->role = kernel_role;
47486+ task->acl = kernel_role->root_label;
47487+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
47488+ task->acl->mode &= ~GR_PROCFIND;
47489+#endif
47490+ }
47491+ } while_each_thread(task2, task);
47492+ read_unlock(&grsec_exec_file_lock);
47493+ read_unlock(&tasklist_lock);
47494+ rcu_read_unlock();
47495+
47496+ return 0;
47497+}
47498+
47499+void
47500+gr_learn_resource(const struct task_struct *task,
47501+ const int res, const unsigned long wanted, const int gt)
47502+{
47503+ struct acl_subject_label *acl;
47504+ const struct cred *cred;
47505+
47506+ if (unlikely((gr_status & GR_READY) &&
47507+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
47508+ goto skip_reslog;
47509+
47510+#ifdef CONFIG_GRKERNSEC_RESLOG
47511+ gr_log_resource(task, res, wanted, gt);
47512+#endif
47513+ skip_reslog:
47514+
47515+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
47516+ return;
47517+
47518+ acl = task->acl;
47519+
47520+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
47521+ !(acl->resmask & (1 << (unsigned short) res))))
47522+ return;
47523+
47524+ if (wanted >= acl->res[res].rlim_cur) {
47525+ unsigned long res_add;
47526+
47527+ res_add = wanted;
47528+ switch (res) {
47529+ case RLIMIT_CPU:
47530+ res_add += GR_RLIM_CPU_BUMP;
47531+ break;
47532+ case RLIMIT_FSIZE:
47533+ res_add += GR_RLIM_FSIZE_BUMP;
47534+ break;
47535+ case RLIMIT_DATA:
47536+ res_add += GR_RLIM_DATA_BUMP;
47537+ break;
47538+ case RLIMIT_STACK:
47539+ res_add += GR_RLIM_STACK_BUMP;
47540+ break;
47541+ case RLIMIT_CORE:
47542+ res_add += GR_RLIM_CORE_BUMP;
47543+ break;
47544+ case RLIMIT_RSS:
47545+ res_add += GR_RLIM_RSS_BUMP;
47546+ break;
47547+ case RLIMIT_NPROC:
47548+ res_add += GR_RLIM_NPROC_BUMP;
47549+ break;
47550+ case RLIMIT_NOFILE:
47551+ res_add += GR_RLIM_NOFILE_BUMP;
47552+ break;
47553+ case RLIMIT_MEMLOCK:
47554+ res_add += GR_RLIM_MEMLOCK_BUMP;
47555+ break;
47556+ case RLIMIT_AS:
47557+ res_add += GR_RLIM_AS_BUMP;
47558+ break;
47559+ case RLIMIT_LOCKS:
47560+ res_add += GR_RLIM_LOCKS_BUMP;
47561+ break;
47562+ case RLIMIT_SIGPENDING:
47563+ res_add += GR_RLIM_SIGPENDING_BUMP;
47564+ break;
47565+ case RLIMIT_MSGQUEUE:
47566+ res_add += GR_RLIM_MSGQUEUE_BUMP;
47567+ break;
47568+ case RLIMIT_NICE:
47569+ res_add += GR_RLIM_NICE_BUMP;
47570+ break;
47571+ case RLIMIT_RTPRIO:
47572+ res_add += GR_RLIM_RTPRIO_BUMP;
47573+ break;
47574+ case RLIMIT_RTTIME:
47575+ res_add += GR_RLIM_RTTIME_BUMP;
47576+ break;
47577+ }
47578+
47579+ acl->res[res].rlim_cur = res_add;
47580+
47581+ if (wanted > acl->res[res].rlim_max)
47582+ acl->res[res].rlim_max = res_add;
47583+
47584+ /* only log the subject filename, since resource logging is supported for
47585+ single-subject learning only */
47586+ rcu_read_lock();
47587+ cred = __task_cred(task);
47588+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47589+ task->role->roletype, cred->uid, cred->gid, acl->filename,
47590+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
47591+ "", (unsigned long) res, &task->signal->saved_ip);
47592+ rcu_read_unlock();
47593+ }
47594+
47595+ return;
47596+}
47597+
47598+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
47599+void
47600+pax_set_initial_flags(struct linux_binprm *bprm)
47601+{
47602+ struct task_struct *task = current;
47603+ struct acl_subject_label *proc;
47604+ unsigned long flags;
47605+
47606+ if (unlikely(!(gr_status & GR_READY)))
47607+ return;
47608+
47609+ flags = pax_get_flags(task);
47610+
47611+ proc = task->acl;
47612+
47613+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
47614+ flags &= ~MF_PAX_PAGEEXEC;
47615+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
47616+ flags &= ~MF_PAX_SEGMEXEC;
47617+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
47618+ flags &= ~MF_PAX_RANDMMAP;
47619+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
47620+ flags &= ~MF_PAX_EMUTRAMP;
47621+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
47622+ flags &= ~MF_PAX_MPROTECT;
47623+
47624+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
47625+ flags |= MF_PAX_PAGEEXEC;
47626+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
47627+ flags |= MF_PAX_SEGMEXEC;
47628+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
47629+ flags |= MF_PAX_RANDMMAP;
47630+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
47631+ flags |= MF_PAX_EMUTRAMP;
47632+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
47633+ flags |= MF_PAX_MPROTECT;
47634+
47635+ pax_set_flags(task, flags);
47636+
47637+ return;
47638+}
47639+#endif
47640+
47641+#ifdef CONFIG_SYSCTL
47642+/* Eric Biederman likes breaking userland ABI and every inode-based security
47643+ system to save 35kb of memory */
47644+
47645+/* we modify the passed in filename, but adjust it back before returning */
47646+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
47647+{
47648+ struct name_entry *nmatch;
47649+ char *p, *lastp = NULL;
47650+ struct acl_object_label *obj = NULL, *tmp;
47651+ struct acl_subject_label *tmpsubj;
47652+ char c = '\0';
47653+
47654+ read_lock(&gr_inode_lock);
47655+
47656+ p = name + len - 1;
47657+ do {
47658+ nmatch = lookup_name_entry(name);
47659+ if (lastp != NULL)
47660+ *lastp = c;
47661+
47662+ if (nmatch == NULL)
47663+ goto next_component;
47664+ tmpsubj = current->acl;
47665+ do {
47666+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
47667+ if (obj != NULL) {
47668+ tmp = obj->globbed;
47669+ while (tmp) {
47670+ if (!glob_match(tmp->filename, name)) {
47671+ obj = tmp;
47672+ goto found_obj;
47673+ }
47674+ tmp = tmp->next;
47675+ }
47676+ goto found_obj;
47677+ }
47678+ } while ((tmpsubj = tmpsubj->parent_subject));
47679+next_component:
47680+ /* end case */
47681+ if (p == name)
47682+ break;
47683+
47684+ while (*p != '/')
47685+ p--;
47686+ if (p == name)
47687+ lastp = p + 1;
47688+ else {
47689+ lastp = p;
47690+ p--;
47691+ }
47692+ c = *lastp;
47693+ *lastp = '\0';
47694+ } while (1);
47695+found_obj:
47696+ read_unlock(&gr_inode_lock);
47697+ /* obj returned will always be non-null */
47698+ return obj;
47699+}
47700+
47701+/* returns 0 when allowing, non-zero on error
47702+ op of 0 is used for readdir, so we don't log the names of hidden files
47703+*/
47704+__u32
47705+gr_handle_sysctl(const struct ctl_table *table, const int op)
47706+{
47707+ struct ctl_table *tmp;
47708+ const char *proc_sys = "/proc/sys";
47709+ char *path;
47710+ struct acl_object_label *obj;
47711+ unsigned short len = 0, pos = 0, depth = 0, i;
47712+ __u32 err = 0;
47713+ __u32 mode = 0;
47714+
47715+ if (unlikely(!(gr_status & GR_READY)))
47716+ return 0;
47717+
47718+ /* for now, ignore operations on non-sysctl entries if it's not a
47719+ readdir*/
47720+ if (table->child != NULL && op != 0)
47721+ return 0;
47722+
47723+ mode |= GR_FIND;
47724+ /* it's only a read if it's an entry, read on dirs is for readdir */
47725+ if (op & MAY_READ)
47726+ mode |= GR_READ;
47727+ if (op & MAY_WRITE)
47728+ mode |= GR_WRITE;
47729+
47730+ preempt_disable();
47731+
47732+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47733+
47734+ /* it's only a read/write if it's an actual entry, not a dir
47735+ (which are opened for readdir)
47736+ */
47737+
47738+ /* convert the requested sysctl entry into a pathname */
47739+
47740+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47741+ len += strlen(tmp->procname);
47742+ len++;
47743+ depth++;
47744+ }
47745+
47746+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
47747+ /* deny */
47748+ goto out;
47749+ }
47750+
47751+ memset(path, 0, PAGE_SIZE);
47752+
47753+ memcpy(path, proc_sys, strlen(proc_sys));
47754+
47755+ pos += strlen(proc_sys);
47756+
47757+ for (; depth > 0; depth--) {
47758+ path[pos] = '/';
47759+ pos++;
47760+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47761+ if (depth == i) {
47762+ memcpy(path + pos, tmp->procname,
47763+ strlen(tmp->procname));
47764+ pos += strlen(tmp->procname);
47765+ }
47766+ i++;
47767+ }
47768+ }
47769+
47770+ obj = gr_lookup_by_name(path, pos);
47771+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
47772+
47773+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
47774+ ((err & mode) != mode))) {
47775+ __u32 new_mode = mode;
47776+
47777+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47778+
47779+ err = 0;
47780+ gr_log_learn_sysctl(path, new_mode);
47781+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
47782+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
47783+ err = -ENOENT;
47784+ } else if (!(err & GR_FIND)) {
47785+ err = -ENOENT;
47786+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
47787+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
47788+ path, (mode & GR_READ) ? " reading" : "",
47789+ (mode & GR_WRITE) ? " writing" : "");
47790+ err = -EACCES;
47791+ } else if ((err & mode) != mode) {
47792+ err = -EACCES;
47793+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
47794+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
47795+ path, (mode & GR_READ) ? " reading" : "",
47796+ (mode & GR_WRITE) ? " writing" : "");
47797+ err = 0;
47798+ } else
47799+ err = 0;
47800+
47801+ out:
47802+ preempt_enable();
47803+
47804+ return err;
47805+}
47806+#endif
47807+
47808+int
47809+gr_handle_proc_ptrace(struct task_struct *task)
47810+{
47811+ struct file *filp;
47812+ struct task_struct *tmp = task;
47813+ struct task_struct *curtemp = current;
47814+ __u32 retmode;
47815+
47816+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47817+ if (unlikely(!(gr_status & GR_READY)))
47818+ return 0;
47819+#endif
47820+
47821+ read_lock(&tasklist_lock);
47822+ read_lock(&grsec_exec_file_lock);
47823+ filp = task->exec_file;
47824+
47825+ while (tmp->pid > 0) {
47826+ if (tmp == curtemp)
47827+ break;
47828+ tmp = tmp->real_parent;
47829+ }
47830+
47831+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47832+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
47833+ read_unlock(&grsec_exec_file_lock);
47834+ read_unlock(&tasklist_lock);
47835+ return 1;
47836+ }
47837+
47838+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47839+ if (!(gr_status & GR_READY)) {
47840+ read_unlock(&grsec_exec_file_lock);
47841+ read_unlock(&tasklist_lock);
47842+ return 0;
47843+ }
47844+#endif
47845+
47846+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
47847+ read_unlock(&grsec_exec_file_lock);
47848+ read_unlock(&tasklist_lock);
47849+
47850+ if (retmode & GR_NOPTRACE)
47851+ return 1;
47852+
47853+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
47854+ && (current->acl != task->acl || (current->acl != current->role->root_label
47855+ && current->pid != task->pid)))
47856+ return 1;
47857+
47858+ return 0;
47859+}
47860+
47861+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
47862+{
47863+ if (unlikely(!(gr_status & GR_READY)))
47864+ return;
47865+
47866+ if (!(current->role->roletype & GR_ROLE_GOD))
47867+ return;
47868+
47869+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
47870+ p->role->rolename, gr_task_roletype_to_char(p),
47871+ p->acl->filename);
47872+}
47873+
47874+int
47875+gr_handle_ptrace(struct task_struct *task, const long request)
47876+{
47877+ struct task_struct *tmp = task;
47878+ struct task_struct *curtemp = current;
47879+ __u32 retmode;
47880+
47881+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47882+ if (unlikely(!(gr_status & GR_READY)))
47883+ return 0;
47884+#endif
47885+
47886+ read_lock(&tasklist_lock);
47887+ while (tmp->pid > 0) {
47888+ if (tmp == curtemp)
47889+ break;
47890+ tmp = tmp->real_parent;
47891+ }
47892+
47893+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47894+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
47895+ read_unlock(&tasklist_lock);
47896+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47897+ return 1;
47898+ }
47899+ read_unlock(&tasklist_lock);
47900+
47901+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47902+ if (!(gr_status & GR_READY))
47903+ return 0;
47904+#endif
47905+
47906+ read_lock(&grsec_exec_file_lock);
47907+ if (unlikely(!task->exec_file)) {
47908+ read_unlock(&grsec_exec_file_lock);
47909+ return 0;
47910+ }
47911+
47912+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
47913+ read_unlock(&grsec_exec_file_lock);
47914+
47915+ if (retmode & GR_NOPTRACE) {
47916+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47917+ return 1;
47918+ }
47919+
47920+ if (retmode & GR_PTRACERD) {
47921+ switch (request) {
47922+ case PTRACE_POKETEXT:
47923+ case PTRACE_POKEDATA:
47924+ case PTRACE_POKEUSR:
47925+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
47926+ case PTRACE_SETREGS:
47927+ case PTRACE_SETFPREGS:
47928+#endif
47929+#ifdef CONFIG_X86
47930+ case PTRACE_SETFPXREGS:
47931+#endif
47932+#ifdef CONFIG_ALTIVEC
47933+ case PTRACE_SETVRREGS:
47934+#endif
47935+ return 1;
47936+ default:
47937+ return 0;
47938+ }
47939+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
47940+ !(current->role->roletype & GR_ROLE_GOD) &&
47941+ (current->acl != task->acl)) {
47942+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47943+ return 1;
47944+ }
47945+
47946+ return 0;
47947+}
47948+
47949+static int is_writable_mmap(const struct file *filp)
47950+{
47951+ struct task_struct *task = current;
47952+ struct acl_object_label *obj, *obj2;
47953+
47954+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
47955+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
47956+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47957+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
47958+ task->role->root_label);
47959+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
47960+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
47961+ return 1;
47962+ }
47963+ }
47964+ return 0;
47965+}
47966+
47967+int
47968+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
47969+{
47970+ __u32 mode;
47971+
47972+ if (unlikely(!file || !(prot & PROT_EXEC)))
47973+ return 1;
47974+
47975+ if (is_writable_mmap(file))
47976+ return 0;
47977+
47978+ mode =
47979+ gr_search_file(file->f_path.dentry,
47980+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47981+ file->f_path.mnt);
47982+
47983+ if (!gr_tpe_allow(file))
47984+ return 0;
47985+
47986+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47987+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47988+ return 0;
47989+ } else if (unlikely(!(mode & GR_EXEC))) {
47990+ return 0;
47991+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47992+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47993+ return 1;
47994+ }
47995+
47996+ return 1;
47997+}
47998+
47999+int
48000+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
48001+{
48002+ __u32 mode;
48003+
48004+ if (unlikely(!file || !(prot & PROT_EXEC)))
48005+ return 1;
48006+
48007+ if (is_writable_mmap(file))
48008+ return 0;
48009+
48010+ mode =
48011+ gr_search_file(file->f_path.dentry,
48012+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
48013+ file->f_path.mnt);
48014+
48015+ if (!gr_tpe_allow(file))
48016+ return 0;
48017+
48018+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
48019+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48020+ return 0;
48021+ } else if (unlikely(!(mode & GR_EXEC))) {
48022+ return 0;
48023+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
48024+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48025+ return 1;
48026+ }
48027+
48028+ return 1;
48029+}
48030+
48031+void
48032+gr_acl_handle_psacct(struct task_struct *task, const long code)
48033+{
48034+ unsigned long runtime;
48035+ unsigned long cputime;
48036+ unsigned int wday, cday;
48037+ __u8 whr, chr;
48038+ __u8 wmin, cmin;
48039+ __u8 wsec, csec;
48040+ struct timespec timeval;
48041+
48042+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
48043+ !(task->acl->mode & GR_PROCACCT)))
48044+ return;
48045+
48046+ do_posix_clock_monotonic_gettime(&timeval);
48047+ runtime = timeval.tv_sec - task->start_time.tv_sec;
48048+ wday = runtime / (3600 * 24);
48049+ runtime -= wday * (3600 * 24);
48050+ whr = runtime / 3600;
48051+ runtime -= whr * 3600;
48052+ wmin = runtime / 60;
48053+ runtime -= wmin * 60;
48054+ wsec = runtime;
48055+
48056+ cputime = (task->utime + task->stime) / HZ;
48057+ cday = cputime / (3600 * 24);
48058+ cputime -= cday * (3600 * 24);
48059+ chr = cputime / 3600;
48060+ cputime -= chr * 3600;
48061+ cmin = cputime / 60;
48062+ cputime -= cmin * 60;
48063+ csec = cputime;
48064+
48065+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
48066+
48067+ return;
48068+}
48069+
48070+void gr_set_kernel_label(struct task_struct *task)
48071+{
48072+ if (gr_status & GR_READY) {
48073+ task->role = kernel_role;
48074+ task->acl = kernel_role->root_label;
48075+ }
48076+ return;
48077+}
48078+
48079+#ifdef CONFIG_TASKSTATS
48080+int gr_is_taskstats_denied(int pid)
48081+{
48082+ struct task_struct *task;
48083+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48084+ const struct cred *cred;
48085+#endif
48086+ int ret = 0;
48087+
48088+ /* restrict taskstats viewing to un-chrooted root users
48089+ who have the 'view' subject flag if the RBAC system is enabled
48090+ */
48091+
48092+ rcu_read_lock();
48093+ read_lock(&tasklist_lock);
48094+ task = find_task_by_vpid(pid);
48095+ if (task) {
48096+#ifdef CONFIG_GRKERNSEC_CHROOT
48097+ if (proc_is_chrooted(task))
48098+ ret = -EACCES;
48099+#endif
48100+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48101+ cred = __task_cred(task);
48102+#ifdef CONFIG_GRKERNSEC_PROC_USER
48103+ if (cred->uid != 0)
48104+ ret = -EACCES;
48105+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48106+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
48107+ ret = -EACCES;
48108+#endif
48109+#endif
48110+ if (gr_status & GR_READY) {
48111+ if (!(task->acl->mode & GR_VIEW))
48112+ ret = -EACCES;
48113+ }
48114+ } else
48115+ ret = -ENOENT;
48116+
48117+ read_unlock(&tasklist_lock);
48118+ rcu_read_unlock();
48119+
48120+ return ret;
48121+}
48122+#endif
48123+
48124+/* AUXV entries are filled via a descendant of search_binary_handler
48125+ after we've already applied the subject for the target
48126+*/
48127+int gr_acl_enable_at_secure(void)
48128+{
48129+ if (unlikely(!(gr_status & GR_READY)))
48130+ return 0;
48131+
48132+ if (current->acl->mode & GR_ATSECURE)
48133+ return 1;
48134+
48135+ return 0;
48136+}
48137+
48138+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
48139+{
48140+ struct task_struct *task = current;
48141+ struct dentry *dentry = file->f_path.dentry;
48142+ struct vfsmount *mnt = file->f_path.mnt;
48143+ struct acl_object_label *obj, *tmp;
48144+ struct acl_subject_label *subj;
48145+ unsigned int bufsize;
48146+ int is_not_root;
48147+ char *path;
48148+ dev_t dev = __get_dev(dentry);
48149+
48150+ if (unlikely(!(gr_status & GR_READY)))
48151+ return 1;
48152+
48153+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48154+ return 1;
48155+
48156+ /* ignore Eric Biederman */
48157+ if (IS_PRIVATE(dentry->d_inode))
48158+ return 1;
48159+
48160+ subj = task->acl;
48161+ do {
48162+ obj = lookup_acl_obj_label(ino, dev, subj);
48163+ if (obj != NULL)
48164+ return (obj->mode & GR_FIND) ? 1 : 0;
48165+ } while ((subj = subj->parent_subject));
48166+
48167+ /* this is purely an optimization since we're looking for an object
48168+ for the directory we're doing a readdir on
48169+ if it's possible for any globbed object to match the entry we're
48170+ filling into the directory, then the object we find here will be
48171+ an anchor point with attached globbed objects
48172+ */
48173+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
48174+ if (obj->globbed == NULL)
48175+ return (obj->mode & GR_FIND) ? 1 : 0;
48176+
48177+ is_not_root = ((obj->filename[0] == '/') &&
48178+ (obj->filename[1] == '\0')) ? 0 : 1;
48179+ bufsize = PAGE_SIZE - namelen - is_not_root;
48180+
48181+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
48182+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
48183+ return 1;
48184+
48185+ preempt_disable();
48186+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48187+ bufsize);
48188+
48189+ bufsize = strlen(path);
48190+
48191+ /* if base is "/", don't append an additional slash */
48192+ if (is_not_root)
48193+ *(path + bufsize) = '/';
48194+ memcpy(path + bufsize + is_not_root, name, namelen);
48195+ *(path + bufsize + namelen + is_not_root) = '\0';
48196+
48197+ tmp = obj->globbed;
48198+ while (tmp) {
48199+ if (!glob_match(tmp->filename, path)) {
48200+ preempt_enable();
48201+ return (tmp->mode & GR_FIND) ? 1 : 0;
48202+ }
48203+ tmp = tmp->next;
48204+ }
48205+ preempt_enable();
48206+ return (obj->mode & GR_FIND) ? 1 : 0;
48207+}
48208+
48209+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
48210+EXPORT_SYMBOL(gr_acl_is_enabled);
48211+#endif
48212+EXPORT_SYMBOL(gr_learn_resource);
48213+EXPORT_SYMBOL(gr_set_kernel_label);
48214+#ifdef CONFIG_SECURITY
48215+EXPORT_SYMBOL(gr_check_user_change);
48216+EXPORT_SYMBOL(gr_check_group_change);
48217+#endif
48218+
48219diff -urNp linux-3.0.4/grsecurity/gracl_cap.c linux-3.0.4/grsecurity/gracl_cap.c
48220--- linux-3.0.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
48221+++ linux-3.0.4/grsecurity/gracl_cap.c 2011-09-14 09:21:24.000000000 -0400
48222@@ -0,0 +1,101 @@
48223+#include <linux/kernel.h>
48224+#include <linux/module.h>
48225+#include <linux/sched.h>
48226+#include <linux/gracl.h>
48227+#include <linux/grsecurity.h>
48228+#include <linux/grinternal.h>
48229+
48230+extern const char *captab_log[];
48231+extern int captab_log_entries;
48232+
48233+int
48234+gr_acl_is_capable(const int cap)
48235+{
48236+ struct task_struct *task = current;
48237+ const struct cred *cred = current_cred();
48238+ struct acl_subject_label *curracl;
48239+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48240+ kernel_cap_t cap_audit = __cap_empty_set;
48241+
48242+ if (!gr_acl_is_enabled())
48243+ return 1;
48244+
48245+ curracl = task->acl;
48246+
48247+ cap_drop = curracl->cap_lower;
48248+ cap_mask = curracl->cap_mask;
48249+ cap_audit = curracl->cap_invert_audit;
48250+
48251+ while ((curracl = curracl->parent_subject)) {
48252+ /* if the cap isn't specified in the current computed mask but is specified in the
48253+ current level subject, and is lowered in the current level subject, then add
48254+ it to the set of dropped capabilities
48255+ otherwise, add the current level subject's mask to the current computed mask
48256+ */
48257+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48258+ cap_raise(cap_mask, cap);
48259+ if (cap_raised(curracl->cap_lower, cap))
48260+ cap_raise(cap_drop, cap);
48261+ if (cap_raised(curracl->cap_invert_audit, cap))
48262+ cap_raise(cap_audit, cap);
48263+ }
48264+ }
48265+
48266+ if (!cap_raised(cap_drop, cap)) {
48267+ if (cap_raised(cap_audit, cap))
48268+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
48269+ return 1;
48270+ }
48271+
48272+ curracl = task->acl;
48273+
48274+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
48275+ && cap_raised(cred->cap_effective, cap)) {
48276+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
48277+ task->role->roletype, cred->uid,
48278+ cred->gid, task->exec_file ?
48279+ gr_to_filename(task->exec_file->f_path.dentry,
48280+ task->exec_file->f_path.mnt) : curracl->filename,
48281+ curracl->filename, 0UL,
48282+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
48283+ return 1;
48284+ }
48285+
48286+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
48287+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
48288+ return 0;
48289+}
48290+
48291+int
48292+gr_acl_is_capable_nolog(const int cap)
48293+{
48294+ struct acl_subject_label *curracl;
48295+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48296+
48297+ if (!gr_acl_is_enabled())
48298+ return 1;
48299+
48300+ curracl = current->acl;
48301+
48302+ cap_drop = curracl->cap_lower;
48303+ cap_mask = curracl->cap_mask;
48304+
48305+ while ((curracl = curracl->parent_subject)) {
48306+ /* if the cap isn't specified in the current computed mask but is specified in the
48307+ current level subject, and is lowered in the current level subject, then add
48308+ it to the set of dropped capabilities
48309+ otherwise, add the current level subject's mask to the current computed mask
48310+ */
48311+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48312+ cap_raise(cap_mask, cap);
48313+ if (cap_raised(curracl->cap_lower, cap))
48314+ cap_raise(cap_drop, cap);
48315+ }
48316+ }
48317+
48318+ if (!cap_raised(cap_drop, cap))
48319+ return 1;
48320+
48321+ return 0;
48322+}
48323+
48324diff -urNp linux-3.0.4/grsecurity/gracl_fs.c linux-3.0.4/grsecurity/gracl_fs.c
48325--- linux-3.0.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
48326+++ linux-3.0.4/grsecurity/gracl_fs.c 2011-08-23 21:48:14.000000000 -0400
48327@@ -0,0 +1,431 @@
48328+#include <linux/kernel.h>
48329+#include <linux/sched.h>
48330+#include <linux/types.h>
48331+#include <linux/fs.h>
48332+#include <linux/file.h>
48333+#include <linux/stat.h>
48334+#include <linux/grsecurity.h>
48335+#include <linux/grinternal.h>
48336+#include <linux/gracl.h>
48337+
48338+__u32
48339+gr_acl_handle_hidden_file(const struct dentry * dentry,
48340+ const struct vfsmount * mnt)
48341+{
48342+ __u32 mode;
48343+
48344+ if (unlikely(!dentry->d_inode))
48345+ return GR_FIND;
48346+
48347+ mode =
48348+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
48349+
48350+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
48351+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48352+ return mode;
48353+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
48354+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48355+ return 0;
48356+ } else if (unlikely(!(mode & GR_FIND)))
48357+ return 0;
48358+
48359+ return GR_FIND;
48360+}
48361+
48362+__u32
48363+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
48364+ const int fmode)
48365+{
48366+ __u32 reqmode = GR_FIND;
48367+ __u32 mode;
48368+
48369+ if (unlikely(!dentry->d_inode))
48370+ return reqmode;
48371+
48372+ if (unlikely(fmode & O_APPEND))
48373+ reqmode |= GR_APPEND;
48374+ else if (unlikely(fmode & FMODE_WRITE))
48375+ reqmode |= GR_WRITE;
48376+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48377+ reqmode |= GR_READ;
48378+ if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
48379+ reqmode &= ~GR_READ;
48380+ mode =
48381+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48382+ mnt);
48383+
48384+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48385+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48386+ reqmode & GR_READ ? " reading" : "",
48387+ reqmode & GR_WRITE ? " writing" : reqmode &
48388+ GR_APPEND ? " appending" : "");
48389+ return reqmode;
48390+ } else
48391+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48392+ {
48393+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48394+ reqmode & GR_READ ? " reading" : "",
48395+ reqmode & GR_WRITE ? " writing" : reqmode &
48396+ GR_APPEND ? " appending" : "");
48397+ return 0;
48398+ } else if (unlikely((mode & reqmode) != reqmode))
48399+ return 0;
48400+
48401+ return reqmode;
48402+}
48403+
48404+__u32
48405+gr_acl_handle_creat(const struct dentry * dentry,
48406+ const struct dentry * p_dentry,
48407+ const struct vfsmount * p_mnt, const int fmode,
48408+ const int imode)
48409+{
48410+ __u32 reqmode = GR_WRITE | GR_CREATE;
48411+ __u32 mode;
48412+
48413+ if (unlikely(fmode & O_APPEND))
48414+ reqmode |= GR_APPEND;
48415+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48416+ reqmode |= GR_READ;
48417+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
48418+ reqmode |= GR_SETID;
48419+
48420+ mode =
48421+ gr_check_create(dentry, p_dentry, p_mnt,
48422+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48423+
48424+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48425+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48426+ reqmode & GR_READ ? " reading" : "",
48427+ reqmode & GR_WRITE ? " writing" : reqmode &
48428+ GR_APPEND ? " appending" : "");
48429+ return reqmode;
48430+ } else
48431+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48432+ {
48433+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48434+ reqmode & GR_READ ? " reading" : "",
48435+ reqmode & GR_WRITE ? " writing" : reqmode &
48436+ GR_APPEND ? " appending" : "");
48437+ return 0;
48438+ } else if (unlikely((mode & reqmode) != reqmode))
48439+ return 0;
48440+
48441+ return reqmode;
48442+}
48443+
48444+__u32
48445+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
48446+ const int fmode)
48447+{
48448+ __u32 mode, reqmode = GR_FIND;
48449+
48450+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
48451+ reqmode |= GR_EXEC;
48452+ if (fmode & S_IWOTH)
48453+ reqmode |= GR_WRITE;
48454+ if (fmode & S_IROTH)
48455+ reqmode |= GR_READ;
48456+
48457+ mode =
48458+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48459+ mnt);
48460+
48461+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48462+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48463+ reqmode & GR_READ ? " reading" : "",
48464+ reqmode & GR_WRITE ? " writing" : "",
48465+ reqmode & GR_EXEC ? " executing" : "");
48466+ return reqmode;
48467+ } else
48468+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48469+ {
48470+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48471+ reqmode & GR_READ ? " reading" : "",
48472+ reqmode & GR_WRITE ? " writing" : "",
48473+ reqmode & GR_EXEC ? " executing" : "");
48474+ return 0;
48475+ } else if (unlikely((mode & reqmode) != reqmode))
48476+ return 0;
48477+
48478+ return reqmode;
48479+}
48480+
48481+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
48482+{
48483+ __u32 mode;
48484+
48485+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
48486+
48487+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48488+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
48489+ return mode;
48490+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48491+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
48492+ return 0;
48493+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
48494+ return 0;
48495+
48496+ return (reqmode);
48497+}
48498+
48499+__u32
48500+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
48501+{
48502+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
48503+}
48504+
48505+__u32
48506+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
48507+{
48508+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
48509+}
48510+
48511+__u32
48512+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
48513+{
48514+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
48515+}
48516+
48517+__u32
48518+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
48519+{
48520+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
48521+}
48522+
48523+__u32
48524+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
48525+ mode_t mode)
48526+{
48527+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
48528+ return 1;
48529+
48530+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48531+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48532+ GR_FCHMOD_ACL_MSG);
48533+ } else {
48534+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
48535+ }
48536+}
48537+
48538+__u32
48539+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
48540+ mode_t mode)
48541+{
48542+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48543+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48544+ GR_CHMOD_ACL_MSG);
48545+ } else {
48546+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
48547+ }
48548+}
48549+
48550+__u32
48551+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
48552+{
48553+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
48554+}
48555+
48556+__u32
48557+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
48558+{
48559+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
48560+}
48561+
48562+__u32
48563+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
48564+{
48565+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
48566+}
48567+
48568+__u32
48569+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
48570+{
48571+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
48572+ GR_UNIXCONNECT_ACL_MSG);
48573+}
48574+
48575+/* hardlinks require at minimum create permission,
48576+ any additional privilege required is based on the
48577+ privilege of the file being linked to
48578+*/
48579+__u32
48580+gr_acl_handle_link(const struct dentry * new_dentry,
48581+ const struct dentry * parent_dentry,
48582+ const struct vfsmount * parent_mnt,
48583+ const struct dentry * old_dentry,
48584+ const struct vfsmount * old_mnt, const char *to)
48585+{
48586+ __u32 mode;
48587+ __u32 needmode = GR_CREATE | GR_LINK;
48588+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
48589+
48590+ mode =
48591+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
48592+ old_mnt);
48593+
48594+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
48595+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48596+ return mode;
48597+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48598+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48599+ return 0;
48600+ } else if (unlikely((mode & needmode) != needmode))
48601+ return 0;
48602+
48603+ return 1;
48604+}
48605+
48606+__u32
48607+gr_acl_handle_symlink(const struct dentry * new_dentry,
48608+ const struct dentry * parent_dentry,
48609+ const struct vfsmount * parent_mnt, const char *from)
48610+{
48611+ __u32 needmode = GR_WRITE | GR_CREATE;
48612+ __u32 mode;
48613+
48614+ mode =
48615+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
48616+ GR_CREATE | GR_AUDIT_CREATE |
48617+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
48618+
48619+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
48620+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48621+ return mode;
48622+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48623+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48624+ return 0;
48625+ } else if (unlikely((mode & needmode) != needmode))
48626+ return 0;
48627+
48628+ return (GR_WRITE | GR_CREATE);
48629+}
48630+
48631+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
48632+{
48633+ __u32 mode;
48634+
48635+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48636+
48637+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48638+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
48639+ return mode;
48640+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48641+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
48642+ return 0;
48643+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
48644+ return 0;
48645+
48646+ return (reqmode);
48647+}
48648+
48649+__u32
48650+gr_acl_handle_mknod(const struct dentry * new_dentry,
48651+ const struct dentry * parent_dentry,
48652+ const struct vfsmount * parent_mnt,
48653+ const int mode)
48654+{
48655+ __u32 reqmode = GR_WRITE | GR_CREATE;
48656+ if (unlikely(mode & (S_ISUID | S_ISGID)))
48657+ reqmode |= GR_SETID;
48658+
48659+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48660+ reqmode, GR_MKNOD_ACL_MSG);
48661+}
48662+
48663+__u32
48664+gr_acl_handle_mkdir(const struct dentry *new_dentry,
48665+ const struct dentry *parent_dentry,
48666+ const struct vfsmount *parent_mnt)
48667+{
48668+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48669+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
48670+}
48671+
48672+#define RENAME_CHECK_SUCCESS(old, new) \
48673+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
48674+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
48675+
48676+int
48677+gr_acl_handle_rename(struct dentry *new_dentry,
48678+ struct dentry *parent_dentry,
48679+ const struct vfsmount *parent_mnt,
48680+ struct dentry *old_dentry,
48681+ struct inode *old_parent_inode,
48682+ struct vfsmount *old_mnt, const char *newname)
48683+{
48684+ __u32 comp1, comp2;
48685+ int error = 0;
48686+
48687+ if (unlikely(!gr_acl_is_enabled()))
48688+ return 0;
48689+
48690+ if (!new_dentry->d_inode) {
48691+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
48692+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
48693+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
48694+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
48695+ GR_DELETE | GR_AUDIT_DELETE |
48696+ GR_AUDIT_READ | GR_AUDIT_WRITE |
48697+ GR_SUPPRESS, old_mnt);
48698+ } else {
48699+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
48700+ GR_CREATE | GR_DELETE |
48701+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
48702+ GR_AUDIT_READ | GR_AUDIT_WRITE |
48703+ GR_SUPPRESS, parent_mnt);
48704+ comp2 =
48705+ gr_search_file(old_dentry,
48706+ GR_READ | GR_WRITE | GR_AUDIT_READ |
48707+ GR_DELETE | GR_AUDIT_DELETE |
48708+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
48709+ }
48710+
48711+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
48712+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
48713+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48714+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
48715+ && !(comp2 & GR_SUPPRESS)) {
48716+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48717+ error = -EACCES;
48718+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
48719+ error = -EACCES;
48720+
48721+ return error;
48722+}
48723+
48724+void
48725+gr_acl_handle_exit(void)
48726+{
48727+ u16 id;
48728+ char *rolename;
48729+ struct file *exec_file;
48730+
48731+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
48732+ !(current->role->roletype & GR_ROLE_PERSIST))) {
48733+ id = current->acl_role_id;
48734+ rolename = current->role->rolename;
48735+ gr_set_acls(1);
48736+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
48737+ }
48738+
48739+ write_lock(&grsec_exec_file_lock);
48740+ exec_file = current->exec_file;
48741+ current->exec_file = NULL;
48742+ write_unlock(&grsec_exec_file_lock);
48743+
48744+ if (exec_file)
48745+ fput(exec_file);
48746+}
48747+
48748+int
48749+gr_acl_handle_procpidmem(const struct task_struct *task)
48750+{
48751+ if (unlikely(!gr_acl_is_enabled()))
48752+ return 0;
48753+
48754+ if (task != current && task->acl->mode & GR_PROTPROCFD)
48755+ return -EACCES;
48756+
48757+ return 0;
48758+}
48759diff -urNp linux-3.0.4/grsecurity/gracl_ip.c linux-3.0.4/grsecurity/gracl_ip.c
48760--- linux-3.0.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
48761+++ linux-3.0.4/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
48762@@ -0,0 +1,381 @@
48763+#include <linux/kernel.h>
48764+#include <asm/uaccess.h>
48765+#include <asm/errno.h>
48766+#include <net/sock.h>
48767+#include <linux/file.h>
48768+#include <linux/fs.h>
48769+#include <linux/net.h>
48770+#include <linux/in.h>
48771+#include <linux/skbuff.h>
48772+#include <linux/ip.h>
48773+#include <linux/udp.h>
48774+#include <linux/types.h>
48775+#include <linux/sched.h>
48776+#include <linux/netdevice.h>
48777+#include <linux/inetdevice.h>
48778+#include <linux/gracl.h>
48779+#include <linux/grsecurity.h>
48780+#include <linux/grinternal.h>
48781+
48782+#define GR_BIND 0x01
48783+#define GR_CONNECT 0x02
48784+#define GR_INVERT 0x04
48785+#define GR_BINDOVERRIDE 0x08
48786+#define GR_CONNECTOVERRIDE 0x10
48787+#define GR_SOCK_FAMILY 0x20
48788+
48789+static const char * gr_protocols[IPPROTO_MAX] = {
48790+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
48791+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
48792+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
48793+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
48794+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
48795+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
48796+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
48797+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
48798+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
48799+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
48800+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
48801+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
48802+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
48803+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
48804+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
48805+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
48806+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
48807+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
48808+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
48809+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
48810+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
48811+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
48812+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
48813+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
48814+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
48815+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
48816+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
48817+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
48818+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
48819+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
48820+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
48821+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
48822+ };
48823+
48824+static const char * gr_socktypes[SOCK_MAX] = {
48825+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
48826+ "unknown:7", "unknown:8", "unknown:9", "packet"
48827+ };
48828+
48829+static const char * gr_sockfamilies[AF_MAX+1] = {
48830+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
48831+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
48832+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
48833+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
48834+ };
48835+
48836+const char *
48837+gr_proto_to_name(unsigned char proto)
48838+{
48839+ return gr_protocols[proto];
48840+}
48841+
48842+const char *
48843+gr_socktype_to_name(unsigned char type)
48844+{
48845+ return gr_socktypes[type];
48846+}
48847+
48848+const char *
48849+gr_sockfamily_to_name(unsigned char family)
48850+{
48851+ return gr_sockfamilies[family];
48852+}
48853+
48854+int
48855+gr_search_socket(const int domain, const int type, const int protocol)
48856+{
48857+ struct acl_subject_label *curr;
48858+ const struct cred *cred = current_cred();
48859+
48860+ if (unlikely(!gr_acl_is_enabled()))
48861+ goto exit;
48862+
48863+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
48864+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
48865+ goto exit; // let the kernel handle it
48866+
48867+ curr = current->acl;
48868+
48869+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
48870+ /* the family is allowed, if this is PF_INET allow it only if
48871+ the extra sock type/protocol checks pass */
48872+ if (domain == PF_INET)
48873+ goto inet_check;
48874+ goto exit;
48875+ } else {
48876+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48877+ __u32 fakeip = 0;
48878+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48879+ current->role->roletype, cred->uid,
48880+ cred->gid, current->exec_file ?
48881+ gr_to_filename(current->exec_file->f_path.dentry,
48882+ current->exec_file->f_path.mnt) :
48883+ curr->filename, curr->filename,
48884+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
48885+ &current->signal->saved_ip);
48886+ goto exit;
48887+ }
48888+ goto exit_fail;
48889+ }
48890+
48891+inet_check:
48892+ /* the rest of this checking is for IPv4 only */
48893+ if (!curr->ips)
48894+ goto exit;
48895+
48896+ if ((curr->ip_type & (1 << type)) &&
48897+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
48898+ goto exit;
48899+
48900+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48901+ /* we don't place acls on raw sockets , and sometimes
48902+ dgram/ip sockets are opened for ioctl and not
48903+ bind/connect, so we'll fake a bind learn log */
48904+ if (type == SOCK_RAW || type == SOCK_PACKET) {
48905+ __u32 fakeip = 0;
48906+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48907+ current->role->roletype, cred->uid,
48908+ cred->gid, current->exec_file ?
48909+ gr_to_filename(current->exec_file->f_path.dentry,
48910+ current->exec_file->f_path.mnt) :
48911+ curr->filename, curr->filename,
48912+ &fakeip, 0, type,
48913+ protocol, GR_CONNECT, &current->signal->saved_ip);
48914+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
48915+ __u32 fakeip = 0;
48916+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48917+ current->role->roletype, cred->uid,
48918+ cred->gid, current->exec_file ?
48919+ gr_to_filename(current->exec_file->f_path.dentry,
48920+ current->exec_file->f_path.mnt) :
48921+ curr->filename, curr->filename,
48922+ &fakeip, 0, type,
48923+ protocol, GR_BIND, &current->signal->saved_ip);
48924+ }
48925+ /* we'll log when they use connect or bind */
48926+ goto exit;
48927+ }
48928+
48929+exit_fail:
48930+ if (domain == PF_INET)
48931+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
48932+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
48933+ else
48934+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
48935+ gr_socktype_to_name(type), protocol);
48936+
48937+ return 0;
48938+exit:
48939+ return 1;
48940+}
48941+
48942+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
48943+{
48944+ if ((ip->mode & mode) &&
48945+ (ip_port >= ip->low) &&
48946+ (ip_port <= ip->high) &&
48947+ ((ntohl(ip_addr) & our_netmask) ==
48948+ (ntohl(our_addr) & our_netmask))
48949+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
48950+ && (ip->type & (1 << type))) {
48951+ if (ip->mode & GR_INVERT)
48952+ return 2; // specifically denied
48953+ else
48954+ return 1; // allowed
48955+ }
48956+
48957+ return 0; // not specifically allowed, may continue parsing
48958+}
48959+
48960+static int
48961+gr_search_connectbind(const int full_mode, struct sock *sk,
48962+ struct sockaddr_in *addr, const int type)
48963+{
48964+ char iface[IFNAMSIZ] = {0};
48965+ struct acl_subject_label *curr;
48966+ struct acl_ip_label *ip;
48967+ struct inet_sock *isk;
48968+ struct net_device *dev;
48969+ struct in_device *idev;
48970+ unsigned long i;
48971+ int ret;
48972+ int mode = full_mode & (GR_BIND | GR_CONNECT);
48973+ __u32 ip_addr = 0;
48974+ __u32 our_addr;
48975+ __u32 our_netmask;
48976+ char *p;
48977+ __u16 ip_port = 0;
48978+ const struct cred *cred = current_cred();
48979+
48980+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
48981+ return 0;
48982+
48983+ curr = current->acl;
48984+ isk = inet_sk(sk);
48985+
48986+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
48987+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
48988+ addr->sin_addr.s_addr = curr->inaddr_any_override;
48989+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
48990+ struct sockaddr_in saddr;
48991+ int err;
48992+
48993+ saddr.sin_family = AF_INET;
48994+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
48995+ saddr.sin_port = isk->inet_sport;
48996+
48997+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
48998+ if (err)
48999+ return err;
49000+
49001+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49002+ if (err)
49003+ return err;
49004+ }
49005+
49006+ if (!curr->ips)
49007+ return 0;
49008+
49009+ ip_addr = addr->sin_addr.s_addr;
49010+ ip_port = ntohs(addr->sin_port);
49011+
49012+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49013+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49014+ current->role->roletype, cred->uid,
49015+ cred->gid, current->exec_file ?
49016+ gr_to_filename(current->exec_file->f_path.dentry,
49017+ current->exec_file->f_path.mnt) :
49018+ curr->filename, curr->filename,
49019+ &ip_addr, ip_port, type,
49020+ sk->sk_protocol, mode, &current->signal->saved_ip);
49021+ return 0;
49022+ }
49023+
49024+ for (i = 0; i < curr->ip_num; i++) {
49025+ ip = *(curr->ips + i);
49026+ if (ip->iface != NULL) {
49027+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
49028+ p = strchr(iface, ':');
49029+ if (p != NULL)
49030+ *p = '\0';
49031+ dev = dev_get_by_name(sock_net(sk), iface);
49032+ if (dev == NULL)
49033+ continue;
49034+ idev = in_dev_get(dev);
49035+ if (idev == NULL) {
49036+ dev_put(dev);
49037+ continue;
49038+ }
49039+ rcu_read_lock();
49040+ for_ifa(idev) {
49041+ if (!strcmp(ip->iface, ifa->ifa_label)) {
49042+ our_addr = ifa->ifa_address;
49043+ our_netmask = 0xffffffff;
49044+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49045+ if (ret == 1) {
49046+ rcu_read_unlock();
49047+ in_dev_put(idev);
49048+ dev_put(dev);
49049+ return 0;
49050+ } else if (ret == 2) {
49051+ rcu_read_unlock();
49052+ in_dev_put(idev);
49053+ dev_put(dev);
49054+ goto denied;
49055+ }
49056+ }
49057+ } endfor_ifa(idev);
49058+ rcu_read_unlock();
49059+ in_dev_put(idev);
49060+ dev_put(dev);
49061+ } else {
49062+ our_addr = ip->addr;
49063+ our_netmask = ip->netmask;
49064+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49065+ if (ret == 1)
49066+ return 0;
49067+ else if (ret == 2)
49068+ goto denied;
49069+ }
49070+ }
49071+
49072+denied:
49073+ if (mode == GR_BIND)
49074+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49075+ else if (mode == GR_CONNECT)
49076+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49077+
49078+ return -EACCES;
49079+}
49080+
49081+int
49082+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
49083+{
49084+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
49085+}
49086+
49087+int
49088+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
49089+{
49090+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
49091+}
49092+
49093+int gr_search_listen(struct socket *sock)
49094+{
49095+ struct sock *sk = sock->sk;
49096+ struct sockaddr_in addr;
49097+
49098+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
49099+ addr.sin_port = inet_sk(sk)->inet_sport;
49100+
49101+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49102+}
49103+
49104+int gr_search_accept(struct socket *sock)
49105+{
49106+ struct sock *sk = sock->sk;
49107+ struct sockaddr_in addr;
49108+
49109+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
49110+ addr.sin_port = inet_sk(sk)->inet_sport;
49111+
49112+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49113+}
49114+
49115+int
49116+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
49117+{
49118+ if (addr)
49119+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
49120+ else {
49121+ struct sockaddr_in sin;
49122+ const struct inet_sock *inet = inet_sk(sk);
49123+
49124+ sin.sin_addr.s_addr = inet->inet_daddr;
49125+ sin.sin_port = inet->inet_dport;
49126+
49127+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49128+ }
49129+}
49130+
49131+int
49132+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
49133+{
49134+ struct sockaddr_in sin;
49135+
49136+ if (unlikely(skb->len < sizeof (struct udphdr)))
49137+ return 0; // skip this packet
49138+
49139+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
49140+ sin.sin_port = udp_hdr(skb)->source;
49141+
49142+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49143+}
49144diff -urNp linux-3.0.4/grsecurity/gracl_learn.c linux-3.0.4/grsecurity/gracl_learn.c
49145--- linux-3.0.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
49146+++ linux-3.0.4/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
49147@@ -0,0 +1,207 @@
49148+#include <linux/kernel.h>
49149+#include <linux/mm.h>
49150+#include <linux/sched.h>
49151+#include <linux/poll.h>
49152+#include <linux/string.h>
49153+#include <linux/file.h>
49154+#include <linux/types.h>
49155+#include <linux/vmalloc.h>
49156+#include <linux/grinternal.h>
49157+
49158+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
49159+ size_t count, loff_t *ppos);
49160+extern int gr_acl_is_enabled(void);
49161+
49162+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
49163+static int gr_learn_attached;
49164+
49165+/* use a 512k buffer */
49166+#define LEARN_BUFFER_SIZE (512 * 1024)
49167+
49168+static DEFINE_SPINLOCK(gr_learn_lock);
49169+static DEFINE_MUTEX(gr_learn_user_mutex);
49170+
49171+/* we need to maintain two buffers, so that the kernel context of grlearn
49172+ uses a semaphore around the userspace copying, and the other kernel contexts
49173+ use a spinlock when copying into the buffer, since they cannot sleep
49174+*/
49175+static char *learn_buffer;
49176+static char *learn_buffer_user;
49177+static int learn_buffer_len;
49178+static int learn_buffer_user_len;
49179+
49180+static ssize_t
49181+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
49182+{
49183+ DECLARE_WAITQUEUE(wait, current);
49184+ ssize_t retval = 0;
49185+
49186+ add_wait_queue(&learn_wait, &wait);
49187+ set_current_state(TASK_INTERRUPTIBLE);
49188+ do {
49189+ mutex_lock(&gr_learn_user_mutex);
49190+ spin_lock(&gr_learn_lock);
49191+ if (learn_buffer_len)
49192+ break;
49193+ spin_unlock(&gr_learn_lock);
49194+ mutex_unlock(&gr_learn_user_mutex);
49195+ if (file->f_flags & O_NONBLOCK) {
49196+ retval = -EAGAIN;
49197+ goto out;
49198+ }
49199+ if (signal_pending(current)) {
49200+ retval = -ERESTARTSYS;
49201+ goto out;
49202+ }
49203+
49204+ schedule();
49205+ } while (1);
49206+
49207+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
49208+ learn_buffer_user_len = learn_buffer_len;
49209+ retval = learn_buffer_len;
49210+ learn_buffer_len = 0;
49211+
49212+ spin_unlock(&gr_learn_lock);
49213+
49214+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
49215+ retval = -EFAULT;
49216+
49217+ mutex_unlock(&gr_learn_user_mutex);
49218+out:
49219+ set_current_state(TASK_RUNNING);
49220+ remove_wait_queue(&learn_wait, &wait);
49221+ return retval;
49222+}
49223+
49224+static unsigned int
49225+poll_learn(struct file * file, poll_table * wait)
49226+{
49227+ poll_wait(file, &learn_wait, wait);
49228+
49229+ if (learn_buffer_len)
49230+ return (POLLIN | POLLRDNORM);
49231+
49232+ return 0;
49233+}
49234+
49235+void
49236+gr_clear_learn_entries(void)
49237+{
49238+ char *tmp;
49239+
49240+ mutex_lock(&gr_learn_user_mutex);
49241+ spin_lock(&gr_learn_lock);
49242+ tmp = learn_buffer;
49243+ learn_buffer = NULL;
49244+ spin_unlock(&gr_learn_lock);
49245+ if (tmp)
49246+ vfree(tmp);
49247+ if (learn_buffer_user != NULL) {
49248+ vfree(learn_buffer_user);
49249+ learn_buffer_user = NULL;
49250+ }
49251+ learn_buffer_len = 0;
49252+ mutex_unlock(&gr_learn_user_mutex);
49253+
49254+ return;
49255+}
49256+
49257+void
49258+gr_add_learn_entry(const char *fmt, ...)
49259+{
49260+ va_list args;
49261+ unsigned int len;
49262+
49263+ if (!gr_learn_attached)
49264+ return;
49265+
49266+ spin_lock(&gr_learn_lock);
49267+
49268+ /* leave a gap at the end so we know when it's "full" but don't have to
49269+ compute the exact length of the string we're trying to append
49270+ */
49271+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
49272+ spin_unlock(&gr_learn_lock);
49273+ wake_up_interruptible(&learn_wait);
49274+ return;
49275+ }
49276+ if (learn_buffer == NULL) {
49277+ spin_unlock(&gr_learn_lock);
49278+ return;
49279+ }
49280+
49281+ va_start(args, fmt);
49282+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
49283+ va_end(args);
49284+
49285+ learn_buffer_len += len + 1;
49286+
49287+ spin_unlock(&gr_learn_lock);
49288+ wake_up_interruptible(&learn_wait);
49289+
49290+ return;
49291+}
49292+
49293+static int
49294+open_learn(struct inode *inode, struct file *file)
49295+{
49296+ if (file->f_mode & FMODE_READ && gr_learn_attached)
49297+ return -EBUSY;
49298+ if (file->f_mode & FMODE_READ) {
49299+ int retval = 0;
49300+ mutex_lock(&gr_learn_user_mutex);
49301+ if (learn_buffer == NULL)
49302+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
49303+ if (learn_buffer_user == NULL)
49304+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
49305+ if (learn_buffer == NULL) {
49306+ retval = -ENOMEM;
49307+ goto out_error;
49308+ }
49309+ if (learn_buffer_user == NULL) {
49310+ retval = -ENOMEM;
49311+ goto out_error;
49312+ }
49313+ learn_buffer_len = 0;
49314+ learn_buffer_user_len = 0;
49315+ gr_learn_attached = 1;
49316+out_error:
49317+ mutex_unlock(&gr_learn_user_mutex);
49318+ return retval;
49319+ }
49320+ return 0;
49321+}
49322+
49323+static int
49324+close_learn(struct inode *inode, struct file *file)
49325+{
49326+ if (file->f_mode & FMODE_READ) {
49327+ char *tmp = NULL;
49328+ mutex_lock(&gr_learn_user_mutex);
49329+ spin_lock(&gr_learn_lock);
49330+ tmp = learn_buffer;
49331+ learn_buffer = NULL;
49332+ spin_unlock(&gr_learn_lock);
49333+ if (tmp)
49334+ vfree(tmp);
49335+ if (learn_buffer_user != NULL) {
49336+ vfree(learn_buffer_user);
49337+ learn_buffer_user = NULL;
49338+ }
49339+ learn_buffer_len = 0;
49340+ learn_buffer_user_len = 0;
49341+ gr_learn_attached = 0;
49342+ mutex_unlock(&gr_learn_user_mutex);
49343+ }
49344+
49345+ return 0;
49346+}
49347+
49348+const struct file_operations grsec_fops = {
49349+ .read = read_learn,
49350+ .write = write_grsec_handler,
49351+ .open = open_learn,
49352+ .release = close_learn,
49353+ .poll = poll_learn,
49354+};
49355diff -urNp linux-3.0.4/grsecurity/gracl_res.c linux-3.0.4/grsecurity/gracl_res.c
49356--- linux-3.0.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
49357+++ linux-3.0.4/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
49358@@ -0,0 +1,68 @@
49359+#include <linux/kernel.h>
49360+#include <linux/sched.h>
49361+#include <linux/gracl.h>
49362+#include <linux/grinternal.h>
49363+
49364+static const char *restab_log[] = {
49365+ [RLIMIT_CPU] = "RLIMIT_CPU",
49366+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
49367+ [RLIMIT_DATA] = "RLIMIT_DATA",
49368+ [RLIMIT_STACK] = "RLIMIT_STACK",
49369+ [RLIMIT_CORE] = "RLIMIT_CORE",
49370+ [RLIMIT_RSS] = "RLIMIT_RSS",
49371+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
49372+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
49373+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
49374+ [RLIMIT_AS] = "RLIMIT_AS",
49375+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
49376+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
49377+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
49378+ [RLIMIT_NICE] = "RLIMIT_NICE",
49379+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
49380+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
49381+ [GR_CRASH_RES] = "RLIMIT_CRASH"
49382+};
49383+
49384+void
49385+gr_log_resource(const struct task_struct *task,
49386+ const int res, const unsigned long wanted, const int gt)
49387+{
49388+ const struct cred *cred;
49389+ unsigned long rlim;
49390+
49391+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
49392+ return;
49393+
49394+ // not yet supported resource
49395+ if (unlikely(!restab_log[res]))
49396+ return;
49397+
49398+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
49399+ rlim = task_rlimit_max(task, res);
49400+ else
49401+ rlim = task_rlimit(task, res);
49402+
49403+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
49404+ return;
49405+
49406+ rcu_read_lock();
49407+ cred = __task_cred(task);
49408+
49409+ if (res == RLIMIT_NPROC &&
49410+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
49411+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
49412+ goto out_rcu_unlock;
49413+ else if (res == RLIMIT_MEMLOCK &&
49414+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
49415+ goto out_rcu_unlock;
49416+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
49417+ goto out_rcu_unlock;
49418+ rcu_read_unlock();
49419+
49420+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
49421+
49422+ return;
49423+out_rcu_unlock:
49424+ rcu_read_unlock();
49425+ return;
49426+}
49427diff -urNp linux-3.0.4/grsecurity/gracl_segv.c linux-3.0.4/grsecurity/gracl_segv.c
49428--- linux-3.0.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
49429+++ linux-3.0.4/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
49430@@ -0,0 +1,299 @@
49431+#include <linux/kernel.h>
49432+#include <linux/mm.h>
49433+#include <asm/uaccess.h>
49434+#include <asm/errno.h>
49435+#include <asm/mman.h>
49436+#include <net/sock.h>
49437+#include <linux/file.h>
49438+#include <linux/fs.h>
49439+#include <linux/net.h>
49440+#include <linux/in.h>
49441+#include <linux/slab.h>
49442+#include <linux/types.h>
49443+#include <linux/sched.h>
49444+#include <linux/timer.h>
49445+#include <linux/gracl.h>
49446+#include <linux/grsecurity.h>
49447+#include <linux/grinternal.h>
49448+
49449+static struct crash_uid *uid_set;
49450+static unsigned short uid_used;
49451+static DEFINE_SPINLOCK(gr_uid_lock);
49452+extern rwlock_t gr_inode_lock;
49453+extern struct acl_subject_label *
49454+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
49455+ struct acl_role_label *role);
49456+
49457+#ifdef CONFIG_BTRFS_FS
49458+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
49459+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
49460+#endif
49461+
49462+static inline dev_t __get_dev(const struct dentry *dentry)
49463+{
49464+#ifdef CONFIG_BTRFS_FS
49465+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
49466+ return get_btrfs_dev_from_inode(dentry->d_inode);
49467+ else
49468+#endif
49469+ return dentry->d_inode->i_sb->s_dev;
49470+}
49471+
49472+int
49473+gr_init_uidset(void)
49474+{
49475+ uid_set =
49476+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
49477+ uid_used = 0;
49478+
49479+ return uid_set ? 1 : 0;
49480+}
49481+
49482+void
49483+gr_free_uidset(void)
49484+{
49485+ if (uid_set)
49486+ kfree(uid_set);
49487+
49488+ return;
49489+}
49490+
49491+int
49492+gr_find_uid(const uid_t uid)
49493+{
49494+ struct crash_uid *tmp = uid_set;
49495+ uid_t buid;
49496+ int low = 0, high = uid_used - 1, mid;
49497+
49498+ while (high >= low) {
49499+ mid = (low + high) >> 1;
49500+ buid = tmp[mid].uid;
49501+ if (buid == uid)
49502+ return mid;
49503+ if (buid > uid)
49504+ high = mid - 1;
49505+ if (buid < uid)
49506+ low = mid + 1;
49507+ }
49508+
49509+ return -1;
49510+}
49511+
49512+static __inline__ void
49513+gr_insertsort(void)
49514+{
49515+ unsigned short i, j;
49516+ struct crash_uid index;
49517+
49518+ for (i = 1; i < uid_used; i++) {
49519+ index = uid_set[i];
49520+ j = i;
49521+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
49522+ uid_set[j] = uid_set[j - 1];
49523+ j--;
49524+ }
49525+ uid_set[j] = index;
49526+ }
49527+
49528+ return;
49529+}
49530+
49531+static __inline__ void
49532+gr_insert_uid(const uid_t uid, const unsigned long expires)
49533+{
49534+ int loc;
49535+
49536+ if (uid_used == GR_UIDTABLE_MAX)
49537+ return;
49538+
49539+ loc = gr_find_uid(uid);
49540+
49541+ if (loc >= 0) {
49542+ uid_set[loc].expires = expires;
49543+ return;
49544+ }
49545+
49546+ uid_set[uid_used].uid = uid;
49547+ uid_set[uid_used].expires = expires;
49548+ uid_used++;
49549+
49550+ gr_insertsort();
49551+
49552+ return;
49553+}
49554+
49555+void
49556+gr_remove_uid(const unsigned short loc)
49557+{
49558+ unsigned short i;
49559+
49560+ for (i = loc + 1; i < uid_used; i++)
49561+ uid_set[i - 1] = uid_set[i];
49562+
49563+ uid_used--;
49564+
49565+ return;
49566+}
49567+
49568+int
49569+gr_check_crash_uid(const uid_t uid)
49570+{
49571+ int loc;
49572+ int ret = 0;
49573+
49574+ if (unlikely(!gr_acl_is_enabled()))
49575+ return 0;
49576+
49577+ spin_lock(&gr_uid_lock);
49578+ loc = gr_find_uid(uid);
49579+
49580+ if (loc < 0)
49581+ goto out_unlock;
49582+
49583+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
49584+ gr_remove_uid(loc);
49585+ else
49586+ ret = 1;
49587+
49588+out_unlock:
49589+ spin_unlock(&gr_uid_lock);
49590+ return ret;
49591+}
49592+
49593+static __inline__ int
49594+proc_is_setxid(const struct cred *cred)
49595+{
49596+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
49597+ cred->uid != cred->fsuid)
49598+ return 1;
49599+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
49600+ cred->gid != cred->fsgid)
49601+ return 1;
49602+
49603+ return 0;
49604+}
49605+
49606+extern int gr_fake_force_sig(int sig, struct task_struct *t);
49607+
49608+void
49609+gr_handle_crash(struct task_struct *task, const int sig)
49610+{
49611+ struct acl_subject_label *curr;
49612+ struct acl_subject_label *curr2;
49613+ struct task_struct *tsk, *tsk2;
49614+ const struct cred *cred;
49615+ const struct cred *cred2;
49616+
49617+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
49618+ return;
49619+
49620+ if (unlikely(!gr_acl_is_enabled()))
49621+ return;
49622+
49623+ curr = task->acl;
49624+
49625+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
49626+ return;
49627+
49628+ if (time_before_eq(curr->expires, get_seconds())) {
49629+ curr->expires = 0;
49630+ curr->crashes = 0;
49631+ }
49632+
49633+ curr->crashes++;
49634+
49635+ if (!curr->expires)
49636+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
49637+
49638+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49639+ time_after(curr->expires, get_seconds())) {
49640+ rcu_read_lock();
49641+ cred = __task_cred(task);
49642+ if (cred->uid && proc_is_setxid(cred)) {
49643+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49644+ spin_lock(&gr_uid_lock);
49645+ gr_insert_uid(cred->uid, curr->expires);
49646+ spin_unlock(&gr_uid_lock);
49647+ curr->expires = 0;
49648+ curr->crashes = 0;
49649+ read_lock(&tasklist_lock);
49650+ do_each_thread(tsk2, tsk) {
49651+ cred2 = __task_cred(tsk);
49652+ if (tsk != task && cred2->uid == cred->uid)
49653+ gr_fake_force_sig(SIGKILL, tsk);
49654+ } while_each_thread(tsk2, tsk);
49655+ read_unlock(&tasklist_lock);
49656+ } else {
49657+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49658+ read_lock(&tasklist_lock);
49659+ do_each_thread(tsk2, tsk) {
49660+ if (likely(tsk != task)) {
49661+ curr2 = tsk->acl;
49662+
49663+ if (curr2->device == curr->device &&
49664+ curr2->inode == curr->inode)
49665+ gr_fake_force_sig(SIGKILL, tsk);
49666+ }
49667+ } while_each_thread(tsk2, tsk);
49668+ read_unlock(&tasklist_lock);
49669+ }
49670+ rcu_read_unlock();
49671+ }
49672+
49673+ return;
49674+}
49675+
49676+int
49677+gr_check_crash_exec(const struct file *filp)
49678+{
49679+ struct acl_subject_label *curr;
49680+
49681+ if (unlikely(!gr_acl_is_enabled()))
49682+ return 0;
49683+
49684+ read_lock(&gr_inode_lock);
49685+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
49686+ __get_dev(filp->f_path.dentry),
49687+ current->role);
49688+ read_unlock(&gr_inode_lock);
49689+
49690+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
49691+ (!curr->crashes && !curr->expires))
49692+ return 0;
49693+
49694+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49695+ time_after(curr->expires, get_seconds()))
49696+ return 1;
49697+ else if (time_before_eq(curr->expires, get_seconds())) {
49698+ curr->crashes = 0;
49699+ curr->expires = 0;
49700+ }
49701+
49702+ return 0;
49703+}
49704+
49705+void
49706+gr_handle_alertkill(struct task_struct *task)
49707+{
49708+ struct acl_subject_label *curracl;
49709+ __u32 curr_ip;
49710+ struct task_struct *p, *p2;
49711+
49712+ if (unlikely(!gr_acl_is_enabled()))
49713+ return;
49714+
49715+ curracl = task->acl;
49716+ curr_ip = task->signal->curr_ip;
49717+
49718+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
49719+ read_lock(&tasklist_lock);
49720+ do_each_thread(p2, p) {
49721+ if (p->signal->curr_ip == curr_ip)
49722+ gr_fake_force_sig(SIGKILL, p);
49723+ } while_each_thread(p2, p);
49724+ read_unlock(&tasklist_lock);
49725+ } else if (curracl->mode & GR_KILLPROC)
49726+ gr_fake_force_sig(SIGKILL, task);
49727+
49728+ return;
49729+}
49730diff -urNp linux-3.0.4/grsecurity/gracl_shm.c linux-3.0.4/grsecurity/gracl_shm.c
49731--- linux-3.0.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
49732+++ linux-3.0.4/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
49733@@ -0,0 +1,40 @@
49734+#include <linux/kernel.h>
49735+#include <linux/mm.h>
49736+#include <linux/sched.h>
49737+#include <linux/file.h>
49738+#include <linux/ipc.h>
49739+#include <linux/gracl.h>
49740+#include <linux/grsecurity.h>
49741+#include <linux/grinternal.h>
49742+
49743+int
49744+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49745+ const time_t shm_createtime, const uid_t cuid, const int shmid)
49746+{
49747+ struct task_struct *task;
49748+
49749+ if (!gr_acl_is_enabled())
49750+ return 1;
49751+
49752+ rcu_read_lock();
49753+ read_lock(&tasklist_lock);
49754+
49755+ task = find_task_by_vpid(shm_cprid);
49756+
49757+ if (unlikely(!task))
49758+ task = find_task_by_vpid(shm_lapid);
49759+
49760+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
49761+ (task->pid == shm_lapid)) &&
49762+ (task->acl->mode & GR_PROTSHM) &&
49763+ (task->acl != current->acl))) {
49764+ read_unlock(&tasklist_lock);
49765+ rcu_read_unlock();
49766+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
49767+ return 0;
49768+ }
49769+ read_unlock(&tasklist_lock);
49770+ rcu_read_unlock();
49771+
49772+ return 1;
49773+}
49774diff -urNp linux-3.0.4/grsecurity/grsec_chdir.c linux-3.0.4/grsecurity/grsec_chdir.c
49775--- linux-3.0.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
49776+++ linux-3.0.4/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
49777@@ -0,0 +1,19 @@
49778+#include <linux/kernel.h>
49779+#include <linux/sched.h>
49780+#include <linux/fs.h>
49781+#include <linux/file.h>
49782+#include <linux/grsecurity.h>
49783+#include <linux/grinternal.h>
49784+
49785+void
49786+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
49787+{
49788+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49789+ if ((grsec_enable_chdir && grsec_enable_group &&
49790+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
49791+ !grsec_enable_group)) {
49792+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
49793+ }
49794+#endif
49795+ return;
49796+}
49797diff -urNp linux-3.0.4/grsecurity/grsec_chroot.c linux-3.0.4/grsecurity/grsec_chroot.c
49798--- linux-3.0.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
49799+++ linux-3.0.4/grsecurity/grsec_chroot.c 2011-09-15 06:47:48.000000000 -0400
49800@@ -0,0 +1,351 @@
49801+#include <linux/kernel.h>
49802+#include <linux/module.h>
49803+#include <linux/sched.h>
49804+#include <linux/file.h>
49805+#include <linux/fs.h>
49806+#include <linux/mount.h>
49807+#include <linux/types.h>
49808+#include <linux/pid_namespace.h>
49809+#include <linux/grsecurity.h>
49810+#include <linux/grinternal.h>
49811+
49812+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
49813+{
49814+#ifdef CONFIG_GRKERNSEC
49815+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
49816+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
49817+ task->gr_is_chrooted = 1;
49818+ else
49819+ task->gr_is_chrooted = 0;
49820+
49821+ task->gr_chroot_dentry = path->dentry;
49822+#endif
49823+ return;
49824+}
49825+
49826+void gr_clear_chroot_entries(struct task_struct *task)
49827+{
49828+#ifdef CONFIG_GRKERNSEC
49829+ task->gr_is_chrooted = 0;
49830+ task->gr_chroot_dentry = NULL;
49831+#endif
49832+ return;
49833+}
49834+
49835+int
49836+gr_handle_chroot_unix(const pid_t pid)
49837+{
49838+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49839+ struct task_struct *p;
49840+
49841+ if (unlikely(!grsec_enable_chroot_unix))
49842+ return 1;
49843+
49844+ if (likely(!proc_is_chrooted(current)))
49845+ return 1;
49846+
49847+ rcu_read_lock();
49848+ read_lock(&tasklist_lock);
49849+ p = find_task_by_vpid_unrestricted(pid);
49850+ if (unlikely(p && !have_same_root(current, p))) {
49851+ read_unlock(&tasklist_lock);
49852+ rcu_read_unlock();
49853+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
49854+ return 0;
49855+ }
49856+ read_unlock(&tasklist_lock);
49857+ rcu_read_unlock();
49858+#endif
49859+ return 1;
49860+}
49861+
49862+int
49863+gr_handle_chroot_nice(void)
49864+{
49865+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49866+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
49867+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
49868+ return -EPERM;
49869+ }
49870+#endif
49871+ return 0;
49872+}
49873+
49874+int
49875+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
49876+{
49877+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49878+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
49879+ && proc_is_chrooted(current)) {
49880+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
49881+ return -EACCES;
49882+ }
49883+#endif
49884+ return 0;
49885+}
49886+
49887+int
49888+gr_handle_chroot_rawio(const struct inode *inode)
49889+{
49890+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49891+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
49892+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
49893+ return 1;
49894+#endif
49895+ return 0;
49896+}
49897+
49898+int
49899+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
49900+{
49901+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49902+ struct task_struct *p;
49903+ int ret = 0;
49904+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
49905+ return ret;
49906+
49907+ read_lock(&tasklist_lock);
49908+ do_each_pid_task(pid, type, p) {
49909+ if (!have_same_root(current, p)) {
49910+ ret = 1;
49911+ goto out;
49912+ }
49913+ } while_each_pid_task(pid, type, p);
49914+out:
49915+ read_unlock(&tasklist_lock);
49916+ return ret;
49917+#endif
49918+ return 0;
49919+}
49920+
49921+int
49922+gr_pid_is_chrooted(struct task_struct *p)
49923+{
49924+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49925+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
49926+ return 0;
49927+
49928+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
49929+ !have_same_root(current, p)) {
49930+ return 1;
49931+ }
49932+#endif
49933+ return 0;
49934+}
49935+
49936+EXPORT_SYMBOL(gr_pid_is_chrooted);
49937+
49938+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
49939+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
49940+{
49941+ struct path path, currentroot;
49942+ int ret = 0;
49943+
49944+ path.dentry = (struct dentry *)u_dentry;
49945+ path.mnt = (struct vfsmount *)u_mnt;
49946+ get_fs_root(current->fs, &currentroot);
49947+ if (path_is_under(&path, &currentroot))
49948+ ret = 1;
49949+ path_put(&currentroot);
49950+
49951+ return ret;
49952+}
49953+#endif
49954+
49955+int
49956+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
49957+{
49958+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49959+ if (!grsec_enable_chroot_fchdir)
49960+ return 1;
49961+
49962+ if (!proc_is_chrooted(current))
49963+ return 1;
49964+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
49965+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
49966+ return 0;
49967+ }
49968+#endif
49969+ return 1;
49970+}
49971+
49972+int
49973+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49974+ const time_t shm_createtime)
49975+{
49976+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49977+ struct task_struct *p;
49978+ time_t starttime;
49979+
49980+ if (unlikely(!grsec_enable_chroot_shmat))
49981+ return 1;
49982+
49983+ if (likely(!proc_is_chrooted(current)))
49984+ return 1;
49985+
49986+ rcu_read_lock();
49987+ read_lock(&tasklist_lock);
49988+
49989+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
49990+ starttime = p->start_time.tv_sec;
49991+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
49992+ if (have_same_root(current, p)) {
49993+ goto allow;
49994+ } else {
49995+ read_unlock(&tasklist_lock);
49996+ rcu_read_unlock();
49997+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
49998+ return 0;
49999+ }
50000+ }
50001+ /* creator exited, pid reuse, fall through to next check */
50002+ }
50003+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
50004+ if (unlikely(!have_same_root(current, p))) {
50005+ read_unlock(&tasklist_lock);
50006+ rcu_read_unlock();
50007+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50008+ return 0;
50009+ }
50010+ }
50011+
50012+allow:
50013+ read_unlock(&tasklist_lock);
50014+ rcu_read_unlock();
50015+#endif
50016+ return 1;
50017+}
50018+
50019+void
50020+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
50021+{
50022+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
50023+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
50024+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
50025+#endif
50026+ return;
50027+}
50028+
50029+int
50030+gr_handle_chroot_mknod(const struct dentry *dentry,
50031+ const struct vfsmount *mnt, const int mode)
50032+{
50033+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50034+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
50035+ proc_is_chrooted(current)) {
50036+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
50037+ return -EPERM;
50038+ }
50039+#endif
50040+ return 0;
50041+}
50042+
50043+int
50044+gr_handle_chroot_mount(const struct dentry *dentry,
50045+ const struct vfsmount *mnt, const char *dev_name)
50046+{
50047+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50048+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
50049+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
50050+ return -EPERM;
50051+ }
50052+#endif
50053+ return 0;
50054+}
50055+
50056+int
50057+gr_handle_chroot_pivot(void)
50058+{
50059+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50060+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
50061+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
50062+ return -EPERM;
50063+ }
50064+#endif
50065+ return 0;
50066+}
50067+
50068+int
50069+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
50070+{
50071+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50072+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
50073+ !gr_is_outside_chroot(dentry, mnt)) {
50074+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
50075+ return -EPERM;
50076+ }
50077+#endif
50078+ return 0;
50079+}
50080+
50081+extern const char *captab_log[];
50082+extern int captab_log_entries;
50083+
50084+int
50085+gr_chroot_is_capable(const int cap)
50086+{
50087+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50088+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
50089+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
50090+ if (cap_raised(chroot_caps, cap)) {
50091+ const struct cred *creds = current_cred();
50092+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
50093+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
50094+ }
50095+ return 0;
50096+ }
50097+ }
50098+#endif
50099+ return 1;
50100+}
50101+
50102+int
50103+gr_chroot_is_capable_nolog(const int cap)
50104+{
50105+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50106+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
50107+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
50108+ if (cap_raised(chroot_caps, cap)) {
50109+ return 0;
50110+ }
50111+ }
50112+#endif
50113+ return 1;
50114+}
50115+
50116+int
50117+gr_handle_chroot_sysctl(const int op)
50118+{
50119+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50120+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
50121+ proc_is_chrooted(current))
50122+ return -EACCES;
50123+#endif
50124+ return 0;
50125+}
50126+
50127+void
50128+gr_handle_chroot_chdir(struct path *path)
50129+{
50130+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50131+ if (grsec_enable_chroot_chdir)
50132+ set_fs_pwd(current->fs, path);
50133+#endif
50134+ return;
50135+}
50136+
50137+int
50138+gr_handle_chroot_chmod(const struct dentry *dentry,
50139+ const struct vfsmount *mnt, const int mode)
50140+{
50141+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50142+ /* allow chmod +s on directories, but not files */
50143+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
50144+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
50145+ proc_is_chrooted(current)) {
50146+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
50147+ return -EPERM;
50148+ }
50149+#endif
50150+ return 0;
50151+}
50152diff -urNp linux-3.0.4/grsecurity/grsec_disabled.c linux-3.0.4/grsecurity/grsec_disabled.c
50153--- linux-3.0.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
50154+++ linux-3.0.4/grsecurity/grsec_disabled.c 2011-08-23 21:48:14.000000000 -0400
50155@@ -0,0 +1,447 @@
50156+#include <linux/kernel.h>
50157+#include <linux/module.h>
50158+#include <linux/sched.h>
50159+#include <linux/file.h>
50160+#include <linux/fs.h>
50161+#include <linux/kdev_t.h>
50162+#include <linux/net.h>
50163+#include <linux/in.h>
50164+#include <linux/ip.h>
50165+#include <linux/skbuff.h>
50166+#include <linux/sysctl.h>
50167+
50168+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50169+void
50170+pax_set_initial_flags(struct linux_binprm *bprm)
50171+{
50172+ return;
50173+}
50174+#endif
50175+
50176+#ifdef CONFIG_SYSCTL
50177+__u32
50178+gr_handle_sysctl(const struct ctl_table * table, const int op)
50179+{
50180+ return 0;
50181+}
50182+#endif
50183+
50184+#ifdef CONFIG_TASKSTATS
50185+int gr_is_taskstats_denied(int pid)
50186+{
50187+ return 0;
50188+}
50189+#endif
50190+
50191+int
50192+gr_acl_is_enabled(void)
50193+{
50194+ return 0;
50195+}
50196+
50197+int
50198+gr_handle_rawio(const struct inode *inode)
50199+{
50200+ return 0;
50201+}
50202+
50203+void
50204+gr_acl_handle_psacct(struct task_struct *task, const long code)
50205+{
50206+ return;
50207+}
50208+
50209+int
50210+gr_handle_ptrace(struct task_struct *task, const long request)
50211+{
50212+ return 0;
50213+}
50214+
50215+int
50216+gr_handle_proc_ptrace(struct task_struct *task)
50217+{
50218+ return 0;
50219+}
50220+
50221+void
50222+gr_learn_resource(const struct task_struct *task,
50223+ const int res, const unsigned long wanted, const int gt)
50224+{
50225+ return;
50226+}
50227+
50228+int
50229+gr_set_acls(const int type)
50230+{
50231+ return 0;
50232+}
50233+
50234+int
50235+gr_check_hidden_task(const struct task_struct *tsk)
50236+{
50237+ return 0;
50238+}
50239+
50240+int
50241+gr_check_protected_task(const struct task_struct *task)
50242+{
50243+ return 0;
50244+}
50245+
50246+int
50247+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50248+{
50249+ return 0;
50250+}
50251+
50252+void
50253+gr_copy_label(struct task_struct *tsk)
50254+{
50255+ return;
50256+}
50257+
50258+void
50259+gr_set_pax_flags(struct task_struct *task)
50260+{
50261+ return;
50262+}
50263+
50264+int
50265+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50266+ const int unsafe_share)
50267+{
50268+ return 0;
50269+}
50270+
50271+void
50272+gr_handle_delete(const ino_t ino, const dev_t dev)
50273+{
50274+ return;
50275+}
50276+
50277+void
50278+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50279+{
50280+ return;
50281+}
50282+
50283+void
50284+gr_handle_crash(struct task_struct *task, const int sig)
50285+{
50286+ return;
50287+}
50288+
50289+int
50290+gr_check_crash_exec(const struct file *filp)
50291+{
50292+ return 0;
50293+}
50294+
50295+int
50296+gr_check_crash_uid(const uid_t uid)
50297+{
50298+ return 0;
50299+}
50300+
50301+void
50302+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50303+ struct dentry *old_dentry,
50304+ struct dentry *new_dentry,
50305+ struct vfsmount *mnt, const __u8 replace)
50306+{
50307+ return;
50308+}
50309+
50310+int
50311+gr_search_socket(const int family, const int type, const int protocol)
50312+{
50313+ return 1;
50314+}
50315+
50316+int
50317+gr_search_connectbind(const int mode, const struct socket *sock,
50318+ const struct sockaddr_in *addr)
50319+{
50320+ return 0;
50321+}
50322+
50323+int
50324+gr_is_capable(const int cap)
50325+{
50326+ return 1;
50327+}
50328+
50329+int
50330+gr_is_capable_nolog(const int cap)
50331+{
50332+ return 1;
50333+}
50334+
50335+void
50336+gr_handle_alertkill(struct task_struct *task)
50337+{
50338+ return;
50339+}
50340+
50341+__u32
50342+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
50343+{
50344+ return 1;
50345+}
50346+
50347+__u32
50348+gr_acl_handle_hidden_file(const struct dentry * dentry,
50349+ const struct vfsmount * mnt)
50350+{
50351+ return 1;
50352+}
50353+
50354+__u32
50355+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50356+ const int fmode)
50357+{
50358+ return 1;
50359+}
50360+
50361+__u32
50362+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
50363+{
50364+ return 1;
50365+}
50366+
50367+__u32
50368+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
50369+{
50370+ return 1;
50371+}
50372+
50373+int
50374+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
50375+ unsigned int *vm_flags)
50376+{
50377+ return 1;
50378+}
50379+
50380+__u32
50381+gr_acl_handle_truncate(const struct dentry * dentry,
50382+ const struct vfsmount * mnt)
50383+{
50384+ return 1;
50385+}
50386+
50387+__u32
50388+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
50389+{
50390+ return 1;
50391+}
50392+
50393+__u32
50394+gr_acl_handle_access(const struct dentry * dentry,
50395+ const struct vfsmount * mnt, const int fmode)
50396+{
50397+ return 1;
50398+}
50399+
50400+__u32
50401+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
50402+ mode_t mode)
50403+{
50404+ return 1;
50405+}
50406+
50407+__u32
50408+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
50409+ mode_t mode)
50410+{
50411+ return 1;
50412+}
50413+
50414+__u32
50415+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
50416+{
50417+ return 1;
50418+}
50419+
50420+__u32
50421+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
50422+{
50423+ return 1;
50424+}
50425+
50426+void
50427+grsecurity_init(void)
50428+{
50429+ return;
50430+}
50431+
50432+__u32
50433+gr_acl_handle_mknod(const struct dentry * new_dentry,
50434+ const struct dentry * parent_dentry,
50435+ const struct vfsmount * parent_mnt,
50436+ const int mode)
50437+{
50438+ return 1;
50439+}
50440+
50441+__u32
50442+gr_acl_handle_mkdir(const struct dentry * new_dentry,
50443+ const struct dentry * parent_dentry,
50444+ const struct vfsmount * parent_mnt)
50445+{
50446+ return 1;
50447+}
50448+
50449+__u32
50450+gr_acl_handle_symlink(const struct dentry * new_dentry,
50451+ const struct dentry * parent_dentry,
50452+ const struct vfsmount * parent_mnt, const char *from)
50453+{
50454+ return 1;
50455+}
50456+
50457+__u32
50458+gr_acl_handle_link(const struct dentry * new_dentry,
50459+ const struct dentry * parent_dentry,
50460+ const struct vfsmount * parent_mnt,
50461+ const struct dentry * old_dentry,
50462+ const struct vfsmount * old_mnt, const char *to)
50463+{
50464+ return 1;
50465+}
50466+
50467+int
50468+gr_acl_handle_rename(const struct dentry *new_dentry,
50469+ const struct dentry *parent_dentry,
50470+ const struct vfsmount *parent_mnt,
50471+ const struct dentry *old_dentry,
50472+ const struct inode *old_parent_inode,
50473+ const struct vfsmount *old_mnt, const char *newname)
50474+{
50475+ return 0;
50476+}
50477+
50478+int
50479+gr_acl_handle_filldir(const struct file *file, const char *name,
50480+ const int namelen, const ino_t ino)
50481+{
50482+ return 1;
50483+}
50484+
50485+int
50486+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50487+ const time_t shm_createtime, const uid_t cuid, const int shmid)
50488+{
50489+ return 1;
50490+}
50491+
50492+int
50493+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
50494+{
50495+ return 0;
50496+}
50497+
50498+int
50499+gr_search_accept(const struct socket *sock)
50500+{
50501+ return 0;
50502+}
50503+
50504+int
50505+gr_search_listen(const struct socket *sock)
50506+{
50507+ return 0;
50508+}
50509+
50510+int
50511+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
50512+{
50513+ return 0;
50514+}
50515+
50516+__u32
50517+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
50518+{
50519+ return 1;
50520+}
50521+
50522+__u32
50523+gr_acl_handle_creat(const struct dentry * dentry,
50524+ const struct dentry * p_dentry,
50525+ const struct vfsmount * p_mnt, const int fmode,
50526+ const int imode)
50527+{
50528+ return 1;
50529+}
50530+
50531+void
50532+gr_acl_handle_exit(void)
50533+{
50534+ return;
50535+}
50536+
50537+int
50538+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50539+{
50540+ return 1;
50541+}
50542+
50543+void
50544+gr_set_role_label(const uid_t uid, const gid_t gid)
50545+{
50546+ return;
50547+}
50548+
50549+int
50550+gr_acl_handle_procpidmem(const struct task_struct *task)
50551+{
50552+ return 0;
50553+}
50554+
50555+int
50556+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
50557+{
50558+ return 0;
50559+}
50560+
50561+int
50562+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
50563+{
50564+ return 0;
50565+}
50566+
50567+void
50568+gr_set_kernel_label(struct task_struct *task)
50569+{
50570+ return;
50571+}
50572+
50573+int
50574+gr_check_user_change(int real, int effective, int fs)
50575+{
50576+ return 0;
50577+}
50578+
50579+int
50580+gr_check_group_change(int real, int effective, int fs)
50581+{
50582+ return 0;
50583+}
50584+
50585+int gr_acl_enable_at_secure(void)
50586+{
50587+ return 0;
50588+}
50589+
50590+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50591+{
50592+ return dentry->d_inode->i_sb->s_dev;
50593+}
50594+
50595+EXPORT_SYMBOL(gr_is_capable);
50596+EXPORT_SYMBOL(gr_is_capable_nolog);
50597+EXPORT_SYMBOL(gr_learn_resource);
50598+EXPORT_SYMBOL(gr_set_kernel_label);
50599+#ifdef CONFIG_SECURITY
50600+EXPORT_SYMBOL(gr_check_user_change);
50601+EXPORT_SYMBOL(gr_check_group_change);
50602+#endif
50603diff -urNp linux-3.0.4/grsecurity/grsec_exec.c linux-3.0.4/grsecurity/grsec_exec.c
50604--- linux-3.0.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
50605+++ linux-3.0.4/grsecurity/grsec_exec.c 2011-09-14 09:20:28.000000000 -0400
50606@@ -0,0 +1,145 @@
50607+#include <linux/kernel.h>
50608+#include <linux/sched.h>
50609+#include <linux/file.h>
50610+#include <linux/binfmts.h>
50611+#include <linux/fs.h>
50612+#include <linux/types.h>
50613+#include <linux/grdefs.h>
50614+#include <linux/grsecurity.h>
50615+#include <linux/grinternal.h>
50616+#include <linux/capability.h>
50617+#include <linux/module.h>
50618+
50619+#include <asm/uaccess.h>
50620+
50621+#ifdef CONFIG_GRKERNSEC_EXECLOG
50622+static char gr_exec_arg_buf[132];
50623+static DEFINE_MUTEX(gr_exec_arg_mutex);
50624+#endif
50625+
50626+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
50627+
50628+void
50629+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
50630+{
50631+#ifdef CONFIG_GRKERNSEC_EXECLOG
50632+ char *grarg = gr_exec_arg_buf;
50633+ unsigned int i, x, execlen = 0;
50634+ char c;
50635+
50636+ if (!((grsec_enable_execlog && grsec_enable_group &&
50637+ in_group_p(grsec_audit_gid))
50638+ || (grsec_enable_execlog && !grsec_enable_group)))
50639+ return;
50640+
50641+ mutex_lock(&gr_exec_arg_mutex);
50642+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
50643+
50644+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
50645+ const char __user *p;
50646+ unsigned int len;
50647+
50648+ p = get_user_arg_ptr(argv, i);
50649+ if (IS_ERR(p))
50650+ goto log;
50651+
50652+ len = strnlen_user(p, 128 - execlen);
50653+ if (len > 128 - execlen)
50654+ len = 128 - execlen;
50655+ else if (len > 0)
50656+ len--;
50657+ if (copy_from_user(grarg + execlen, p, len))
50658+ goto log;
50659+
50660+ /* rewrite unprintable characters */
50661+ for (x = 0; x < len; x++) {
50662+ c = *(grarg + execlen + x);
50663+ if (c < 32 || c > 126)
50664+ *(grarg + execlen + x) = ' ';
50665+ }
50666+
50667+ execlen += len;
50668+ *(grarg + execlen) = ' ';
50669+ *(grarg + execlen + 1) = '\0';
50670+ execlen++;
50671+ }
50672+
50673+ log:
50674+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50675+ bprm->file->f_path.mnt, grarg);
50676+ mutex_unlock(&gr_exec_arg_mutex);
50677+#endif
50678+ return;
50679+}
50680+
50681+#ifdef CONFIG_GRKERNSEC
50682+extern int gr_acl_is_capable(const int cap);
50683+extern int gr_acl_is_capable_nolog(const int cap);
50684+extern int gr_chroot_is_capable(const int cap);
50685+extern int gr_chroot_is_capable_nolog(const int cap);
50686+#endif
50687+
50688+const char *captab_log[] = {
50689+ "CAP_CHOWN",
50690+ "CAP_DAC_OVERRIDE",
50691+ "CAP_DAC_READ_SEARCH",
50692+ "CAP_FOWNER",
50693+ "CAP_FSETID",
50694+ "CAP_KILL",
50695+ "CAP_SETGID",
50696+ "CAP_SETUID",
50697+ "CAP_SETPCAP",
50698+ "CAP_LINUX_IMMUTABLE",
50699+ "CAP_NET_BIND_SERVICE",
50700+ "CAP_NET_BROADCAST",
50701+ "CAP_NET_ADMIN",
50702+ "CAP_NET_RAW",
50703+ "CAP_IPC_LOCK",
50704+ "CAP_IPC_OWNER",
50705+ "CAP_SYS_MODULE",
50706+ "CAP_SYS_RAWIO",
50707+ "CAP_SYS_CHROOT",
50708+ "CAP_SYS_PTRACE",
50709+ "CAP_SYS_PACCT",
50710+ "CAP_SYS_ADMIN",
50711+ "CAP_SYS_BOOT",
50712+ "CAP_SYS_NICE",
50713+ "CAP_SYS_RESOURCE",
50714+ "CAP_SYS_TIME",
50715+ "CAP_SYS_TTY_CONFIG",
50716+ "CAP_MKNOD",
50717+ "CAP_LEASE",
50718+ "CAP_AUDIT_WRITE",
50719+ "CAP_AUDIT_CONTROL",
50720+ "CAP_SETFCAP",
50721+ "CAP_MAC_OVERRIDE",
50722+ "CAP_MAC_ADMIN",
50723+ "CAP_SYSLOG"
50724+};
50725+
50726+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
50727+
50728+int gr_is_capable(const int cap)
50729+{
50730+#ifdef CONFIG_GRKERNSEC
50731+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
50732+ return 1;
50733+ return 0;
50734+#else
50735+ return 1;
50736+#endif
50737+}
50738+
50739+int gr_is_capable_nolog(const int cap)
50740+{
50741+#ifdef CONFIG_GRKERNSEC
50742+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
50743+ return 1;
50744+ return 0;
50745+#else
50746+ return 1;
50747+#endif
50748+}
50749+
50750+EXPORT_SYMBOL(gr_is_capable);
50751+EXPORT_SYMBOL(gr_is_capable_nolog);
50752diff -urNp linux-3.0.4/grsecurity/grsec_fifo.c linux-3.0.4/grsecurity/grsec_fifo.c
50753--- linux-3.0.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
50754+++ linux-3.0.4/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
50755@@ -0,0 +1,24 @@
50756+#include <linux/kernel.h>
50757+#include <linux/sched.h>
50758+#include <linux/fs.h>
50759+#include <linux/file.h>
50760+#include <linux/grinternal.h>
50761+
50762+int
50763+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
50764+ const struct dentry *dir, const int flag, const int acc_mode)
50765+{
50766+#ifdef CONFIG_GRKERNSEC_FIFO
50767+ const struct cred *cred = current_cred();
50768+
50769+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
50770+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
50771+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
50772+ (cred->fsuid != dentry->d_inode->i_uid)) {
50773+ if (!inode_permission(dentry->d_inode, acc_mode))
50774+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
50775+ return -EACCES;
50776+ }
50777+#endif
50778+ return 0;
50779+}
50780diff -urNp linux-3.0.4/grsecurity/grsec_fork.c linux-3.0.4/grsecurity/grsec_fork.c
50781--- linux-3.0.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
50782+++ linux-3.0.4/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
50783@@ -0,0 +1,23 @@
50784+#include <linux/kernel.h>
50785+#include <linux/sched.h>
50786+#include <linux/grsecurity.h>
50787+#include <linux/grinternal.h>
50788+#include <linux/errno.h>
50789+
50790+void
50791+gr_log_forkfail(const int retval)
50792+{
50793+#ifdef CONFIG_GRKERNSEC_FORKFAIL
50794+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
50795+ switch (retval) {
50796+ case -EAGAIN:
50797+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
50798+ break;
50799+ case -ENOMEM:
50800+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
50801+ break;
50802+ }
50803+ }
50804+#endif
50805+ return;
50806+}
50807diff -urNp linux-3.0.4/grsecurity/grsec_init.c linux-3.0.4/grsecurity/grsec_init.c
50808--- linux-3.0.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
50809+++ linux-3.0.4/grsecurity/grsec_init.c 2011-08-25 17:25:12.000000000 -0400
50810@@ -0,0 +1,269 @@
50811+#include <linux/kernel.h>
50812+#include <linux/sched.h>
50813+#include <linux/mm.h>
50814+#include <linux/gracl.h>
50815+#include <linux/slab.h>
50816+#include <linux/vmalloc.h>
50817+#include <linux/percpu.h>
50818+#include <linux/module.h>
50819+
50820+int grsec_enable_brute;
50821+int grsec_enable_link;
50822+int grsec_enable_dmesg;
50823+int grsec_enable_harden_ptrace;
50824+int grsec_enable_fifo;
50825+int grsec_enable_execlog;
50826+int grsec_enable_signal;
50827+int grsec_enable_forkfail;
50828+int grsec_enable_audit_ptrace;
50829+int grsec_enable_time;
50830+int grsec_enable_audit_textrel;
50831+int grsec_enable_group;
50832+int grsec_audit_gid;
50833+int grsec_enable_chdir;
50834+int grsec_enable_mount;
50835+int grsec_enable_rofs;
50836+int grsec_enable_chroot_findtask;
50837+int grsec_enable_chroot_mount;
50838+int grsec_enable_chroot_shmat;
50839+int grsec_enable_chroot_fchdir;
50840+int grsec_enable_chroot_double;
50841+int grsec_enable_chroot_pivot;
50842+int grsec_enable_chroot_chdir;
50843+int grsec_enable_chroot_chmod;
50844+int grsec_enable_chroot_mknod;
50845+int grsec_enable_chroot_nice;
50846+int grsec_enable_chroot_execlog;
50847+int grsec_enable_chroot_caps;
50848+int grsec_enable_chroot_sysctl;
50849+int grsec_enable_chroot_unix;
50850+int grsec_enable_tpe;
50851+int grsec_tpe_gid;
50852+int grsec_enable_blackhole;
50853+#ifdef CONFIG_IPV6_MODULE
50854+EXPORT_SYMBOL(grsec_enable_blackhole);
50855+#endif
50856+int grsec_lastack_retries;
50857+int grsec_enable_tpe_all;
50858+int grsec_enable_tpe_invert;
50859+int grsec_enable_socket_all;
50860+int grsec_socket_all_gid;
50861+int grsec_enable_socket_client;
50862+int grsec_socket_client_gid;
50863+int grsec_enable_socket_server;
50864+int grsec_socket_server_gid;
50865+int grsec_resource_logging;
50866+int grsec_disable_privio;
50867+int grsec_enable_log_rwxmaps;
50868+int grsec_lock;
50869+
50870+DEFINE_SPINLOCK(grsec_alert_lock);
50871+unsigned long grsec_alert_wtime = 0;
50872+unsigned long grsec_alert_fyet = 0;
50873+
50874+DEFINE_SPINLOCK(grsec_audit_lock);
50875+
50876+DEFINE_RWLOCK(grsec_exec_file_lock);
50877+
50878+char *gr_shared_page[4];
50879+
50880+char *gr_alert_log_fmt;
50881+char *gr_audit_log_fmt;
50882+char *gr_alert_log_buf;
50883+char *gr_audit_log_buf;
50884+
50885+extern struct gr_arg *gr_usermode;
50886+extern unsigned char *gr_system_salt;
50887+extern unsigned char *gr_system_sum;
50888+
50889+void __init
50890+grsecurity_init(void)
50891+{
50892+ int j;
50893+ /* create the per-cpu shared pages */
50894+
50895+#ifdef CONFIG_X86
50896+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
50897+#endif
50898+
50899+ for (j = 0; j < 4; j++) {
50900+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
50901+ if (gr_shared_page[j] == NULL) {
50902+ panic("Unable to allocate grsecurity shared page");
50903+ return;
50904+ }
50905+ }
50906+
50907+ /* allocate log buffers */
50908+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
50909+ if (!gr_alert_log_fmt) {
50910+ panic("Unable to allocate grsecurity alert log format buffer");
50911+ return;
50912+ }
50913+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
50914+ if (!gr_audit_log_fmt) {
50915+ panic("Unable to allocate grsecurity audit log format buffer");
50916+ return;
50917+ }
50918+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50919+ if (!gr_alert_log_buf) {
50920+ panic("Unable to allocate grsecurity alert log buffer");
50921+ return;
50922+ }
50923+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50924+ if (!gr_audit_log_buf) {
50925+ panic("Unable to allocate grsecurity audit log buffer");
50926+ return;
50927+ }
50928+
50929+ /* allocate memory for authentication structure */
50930+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
50931+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
50932+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
50933+
50934+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
50935+ panic("Unable to allocate grsecurity authentication structure");
50936+ return;
50937+ }
50938+
50939+
50940+#ifdef CONFIG_GRKERNSEC_IO
50941+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
50942+ grsec_disable_privio = 1;
50943+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50944+ grsec_disable_privio = 1;
50945+#else
50946+ grsec_disable_privio = 0;
50947+#endif
50948+#endif
50949+
50950+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
50951+ /* for backward compatibility, tpe_invert always defaults to on if
50952+ enabled in the kernel
50953+ */
50954+ grsec_enable_tpe_invert = 1;
50955+#endif
50956+
50957+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50958+#ifndef CONFIG_GRKERNSEC_SYSCTL
50959+ grsec_lock = 1;
50960+#endif
50961+
50962+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
50963+ grsec_enable_audit_textrel = 1;
50964+#endif
50965+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
50966+ grsec_enable_log_rwxmaps = 1;
50967+#endif
50968+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
50969+ grsec_enable_group = 1;
50970+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
50971+#endif
50972+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
50973+ grsec_enable_chdir = 1;
50974+#endif
50975+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50976+ grsec_enable_harden_ptrace = 1;
50977+#endif
50978+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
50979+ grsec_enable_mount = 1;
50980+#endif
50981+#ifdef CONFIG_GRKERNSEC_LINK
50982+ grsec_enable_link = 1;
50983+#endif
50984+#ifdef CONFIG_GRKERNSEC_BRUTE
50985+ grsec_enable_brute = 1;
50986+#endif
50987+#ifdef CONFIG_GRKERNSEC_DMESG
50988+ grsec_enable_dmesg = 1;
50989+#endif
50990+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
50991+ grsec_enable_blackhole = 1;
50992+ grsec_lastack_retries = 4;
50993+#endif
50994+#ifdef CONFIG_GRKERNSEC_FIFO
50995+ grsec_enable_fifo = 1;
50996+#endif
50997+#ifdef CONFIG_GRKERNSEC_EXECLOG
50998+ grsec_enable_execlog = 1;
50999+#endif
51000+#ifdef CONFIG_GRKERNSEC_SIGNAL
51001+ grsec_enable_signal = 1;
51002+#endif
51003+#ifdef CONFIG_GRKERNSEC_FORKFAIL
51004+ grsec_enable_forkfail = 1;
51005+#endif
51006+#ifdef CONFIG_GRKERNSEC_TIME
51007+ grsec_enable_time = 1;
51008+#endif
51009+#ifdef CONFIG_GRKERNSEC_RESLOG
51010+ grsec_resource_logging = 1;
51011+#endif
51012+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
51013+ grsec_enable_chroot_findtask = 1;
51014+#endif
51015+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
51016+ grsec_enable_chroot_unix = 1;
51017+#endif
51018+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
51019+ grsec_enable_chroot_mount = 1;
51020+#endif
51021+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
51022+ grsec_enable_chroot_fchdir = 1;
51023+#endif
51024+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
51025+ grsec_enable_chroot_shmat = 1;
51026+#endif
51027+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51028+ grsec_enable_audit_ptrace = 1;
51029+#endif
51030+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
51031+ grsec_enable_chroot_double = 1;
51032+#endif
51033+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
51034+ grsec_enable_chroot_pivot = 1;
51035+#endif
51036+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
51037+ grsec_enable_chroot_chdir = 1;
51038+#endif
51039+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
51040+ grsec_enable_chroot_chmod = 1;
51041+#endif
51042+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
51043+ grsec_enable_chroot_mknod = 1;
51044+#endif
51045+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51046+ grsec_enable_chroot_nice = 1;
51047+#endif
51048+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
51049+ grsec_enable_chroot_execlog = 1;
51050+#endif
51051+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51052+ grsec_enable_chroot_caps = 1;
51053+#endif
51054+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
51055+ grsec_enable_chroot_sysctl = 1;
51056+#endif
51057+#ifdef CONFIG_GRKERNSEC_TPE
51058+ grsec_enable_tpe = 1;
51059+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
51060+#ifdef CONFIG_GRKERNSEC_TPE_ALL
51061+ grsec_enable_tpe_all = 1;
51062+#endif
51063+#endif
51064+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51065+ grsec_enable_socket_all = 1;
51066+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
51067+#endif
51068+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51069+ grsec_enable_socket_client = 1;
51070+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
51071+#endif
51072+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51073+ grsec_enable_socket_server = 1;
51074+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
51075+#endif
51076+#endif
51077+
51078+ return;
51079+}
51080diff -urNp linux-3.0.4/grsecurity/grsec_link.c linux-3.0.4/grsecurity/grsec_link.c
51081--- linux-3.0.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
51082+++ linux-3.0.4/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
51083@@ -0,0 +1,43 @@
51084+#include <linux/kernel.h>
51085+#include <linux/sched.h>
51086+#include <linux/fs.h>
51087+#include <linux/file.h>
51088+#include <linux/grinternal.h>
51089+
51090+int
51091+gr_handle_follow_link(const struct inode *parent,
51092+ const struct inode *inode,
51093+ const struct dentry *dentry, const struct vfsmount *mnt)
51094+{
51095+#ifdef CONFIG_GRKERNSEC_LINK
51096+ const struct cred *cred = current_cred();
51097+
51098+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
51099+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
51100+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
51101+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
51102+ return -EACCES;
51103+ }
51104+#endif
51105+ return 0;
51106+}
51107+
51108+int
51109+gr_handle_hardlink(const struct dentry *dentry,
51110+ const struct vfsmount *mnt,
51111+ struct inode *inode, const int mode, const char *to)
51112+{
51113+#ifdef CONFIG_GRKERNSEC_LINK
51114+ const struct cred *cred = current_cred();
51115+
51116+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
51117+ (!S_ISREG(mode) || (mode & S_ISUID) ||
51118+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
51119+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
51120+ !capable(CAP_FOWNER) && cred->uid) {
51121+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
51122+ return -EPERM;
51123+ }
51124+#endif
51125+ return 0;
51126+}
51127diff -urNp linux-3.0.4/grsecurity/grsec_log.c linux-3.0.4/grsecurity/grsec_log.c
51128--- linux-3.0.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
51129+++ linux-3.0.4/grsecurity/grsec_log.c 2011-09-14 23:17:55.000000000 -0400
51130@@ -0,0 +1,313 @@
51131+#include <linux/kernel.h>
51132+#include <linux/sched.h>
51133+#include <linux/file.h>
51134+#include <linux/tty.h>
51135+#include <linux/fs.h>
51136+#include <linux/grinternal.h>
51137+
51138+#ifdef CONFIG_TREE_PREEMPT_RCU
51139+#define DISABLE_PREEMPT() preempt_disable()
51140+#define ENABLE_PREEMPT() preempt_enable()
51141+#else
51142+#define DISABLE_PREEMPT()
51143+#define ENABLE_PREEMPT()
51144+#endif
51145+
51146+#define BEGIN_LOCKS(x) \
51147+ DISABLE_PREEMPT(); \
51148+ rcu_read_lock(); \
51149+ read_lock(&tasklist_lock); \
51150+ read_lock(&grsec_exec_file_lock); \
51151+ if (x != GR_DO_AUDIT) \
51152+ spin_lock(&grsec_alert_lock); \
51153+ else \
51154+ spin_lock(&grsec_audit_lock)
51155+
51156+#define END_LOCKS(x) \
51157+ if (x != GR_DO_AUDIT) \
51158+ spin_unlock(&grsec_alert_lock); \
51159+ else \
51160+ spin_unlock(&grsec_audit_lock); \
51161+ read_unlock(&grsec_exec_file_lock); \
51162+ read_unlock(&tasklist_lock); \
51163+ rcu_read_unlock(); \
51164+ ENABLE_PREEMPT(); \
51165+ if (x == GR_DONT_AUDIT) \
51166+ gr_handle_alertkill(current)
51167+
51168+enum {
51169+ FLOODING,
51170+ NO_FLOODING
51171+};
51172+
51173+extern char *gr_alert_log_fmt;
51174+extern char *gr_audit_log_fmt;
51175+extern char *gr_alert_log_buf;
51176+extern char *gr_audit_log_buf;
51177+
51178+static int gr_log_start(int audit)
51179+{
51180+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
51181+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
51182+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51183+ unsigned long curr_secs = get_seconds();
51184+
51185+ if (audit == GR_DO_AUDIT)
51186+ goto set_fmt;
51187+
51188+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
51189+ grsec_alert_wtime = curr_secs;
51190+ grsec_alert_fyet = 0;
51191+ } else if (time_before(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
51192+ if (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST) {
51193+ grsec_alert_fyet++;
51194+ } else if (grsec_alert_fyet && grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
51195+ grsec_alert_wtime = curr_secs;
51196+ grsec_alert_fyet++;
51197+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
51198+ return FLOODING;
51199+ }
51200+ } else return FLOODING;
51201+
51202+set_fmt:
51203+ memset(buf, 0, PAGE_SIZE);
51204+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
51205+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
51206+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51207+ } else if (current->signal->curr_ip) {
51208+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
51209+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
51210+ } else if (gr_acl_is_enabled()) {
51211+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
51212+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51213+ } else {
51214+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
51215+ strcpy(buf, fmt);
51216+ }
51217+
51218+ return NO_FLOODING;
51219+}
51220+
51221+static void gr_log_middle(int audit, const char *msg, va_list ap)
51222+ __attribute__ ((format (printf, 2, 0)));
51223+
51224+static void gr_log_middle(int audit, const char *msg, va_list ap)
51225+{
51226+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51227+ unsigned int len = strlen(buf);
51228+
51229+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51230+
51231+ return;
51232+}
51233+
51234+static void gr_log_middle_varargs(int audit, const char *msg, ...)
51235+ __attribute__ ((format (printf, 2, 3)));
51236+
51237+static void gr_log_middle_varargs(int audit, const char *msg, ...)
51238+{
51239+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51240+ unsigned int len = strlen(buf);
51241+ va_list ap;
51242+
51243+ va_start(ap, msg);
51244+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51245+ va_end(ap);
51246+
51247+ return;
51248+}
51249+
51250+static void gr_log_end(int audit)
51251+{
51252+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51253+ unsigned int len = strlen(buf);
51254+
51255+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
51256+ printk("%s\n", buf);
51257+
51258+ return;
51259+}
51260+
51261+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
51262+{
51263+ int logtype;
51264+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
51265+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
51266+ void *voidptr = NULL;
51267+ int num1 = 0, num2 = 0;
51268+ unsigned long ulong1 = 0, ulong2 = 0;
51269+ struct dentry *dentry = NULL;
51270+ struct vfsmount *mnt = NULL;
51271+ struct file *file = NULL;
51272+ struct task_struct *task = NULL;
51273+ const struct cred *cred, *pcred;
51274+ va_list ap;
51275+
51276+ BEGIN_LOCKS(audit);
51277+ logtype = gr_log_start(audit);
51278+ if (logtype == FLOODING) {
51279+ END_LOCKS(audit);
51280+ return;
51281+ }
51282+ va_start(ap, argtypes);
51283+ switch (argtypes) {
51284+ case GR_TTYSNIFF:
51285+ task = va_arg(ap, struct task_struct *);
51286+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
51287+ break;
51288+ case GR_SYSCTL_HIDDEN:
51289+ str1 = va_arg(ap, char *);
51290+ gr_log_middle_varargs(audit, msg, result, str1);
51291+ break;
51292+ case GR_RBAC:
51293+ dentry = va_arg(ap, struct dentry *);
51294+ mnt = va_arg(ap, struct vfsmount *);
51295+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
51296+ break;
51297+ case GR_RBAC_STR:
51298+ dentry = va_arg(ap, struct dentry *);
51299+ mnt = va_arg(ap, struct vfsmount *);
51300+ str1 = va_arg(ap, char *);
51301+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
51302+ break;
51303+ case GR_STR_RBAC:
51304+ str1 = va_arg(ap, char *);
51305+ dentry = va_arg(ap, struct dentry *);
51306+ mnt = va_arg(ap, struct vfsmount *);
51307+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
51308+ break;
51309+ case GR_RBAC_MODE2:
51310+ dentry = va_arg(ap, struct dentry *);
51311+ mnt = va_arg(ap, struct vfsmount *);
51312+ str1 = va_arg(ap, char *);
51313+ str2 = va_arg(ap, char *);
51314+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
51315+ break;
51316+ case GR_RBAC_MODE3:
51317+ dentry = va_arg(ap, struct dentry *);
51318+ mnt = va_arg(ap, struct vfsmount *);
51319+ str1 = va_arg(ap, char *);
51320+ str2 = va_arg(ap, char *);
51321+ str3 = va_arg(ap, char *);
51322+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
51323+ break;
51324+ case GR_FILENAME:
51325+ dentry = va_arg(ap, struct dentry *);
51326+ mnt = va_arg(ap, struct vfsmount *);
51327+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
51328+ break;
51329+ case GR_STR_FILENAME:
51330+ str1 = va_arg(ap, char *);
51331+ dentry = va_arg(ap, struct dentry *);
51332+ mnt = va_arg(ap, struct vfsmount *);
51333+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
51334+ break;
51335+ case GR_FILENAME_STR:
51336+ dentry = va_arg(ap, struct dentry *);
51337+ mnt = va_arg(ap, struct vfsmount *);
51338+ str1 = va_arg(ap, char *);
51339+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
51340+ break;
51341+ case GR_FILENAME_TWO_INT:
51342+ dentry = va_arg(ap, struct dentry *);
51343+ mnt = va_arg(ap, struct vfsmount *);
51344+ num1 = va_arg(ap, int);
51345+ num2 = va_arg(ap, int);
51346+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
51347+ break;
51348+ case GR_FILENAME_TWO_INT_STR:
51349+ dentry = va_arg(ap, struct dentry *);
51350+ mnt = va_arg(ap, struct vfsmount *);
51351+ num1 = va_arg(ap, int);
51352+ num2 = va_arg(ap, int);
51353+ str1 = va_arg(ap, char *);
51354+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
51355+ break;
51356+ case GR_TEXTREL:
51357+ file = va_arg(ap, struct file *);
51358+ ulong1 = va_arg(ap, unsigned long);
51359+ ulong2 = va_arg(ap, unsigned long);
51360+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
51361+ break;
51362+ case GR_PTRACE:
51363+ task = va_arg(ap, struct task_struct *);
51364+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
51365+ break;
51366+ case GR_RESOURCE:
51367+ task = va_arg(ap, struct task_struct *);
51368+ cred = __task_cred(task);
51369+ pcred = __task_cred(task->real_parent);
51370+ ulong1 = va_arg(ap, unsigned long);
51371+ str1 = va_arg(ap, char *);
51372+ ulong2 = va_arg(ap, unsigned long);
51373+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51374+ break;
51375+ case GR_CAP:
51376+ task = va_arg(ap, struct task_struct *);
51377+ cred = __task_cred(task);
51378+ pcred = __task_cred(task->real_parent);
51379+ str1 = va_arg(ap, char *);
51380+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51381+ break;
51382+ case GR_SIG:
51383+ str1 = va_arg(ap, char *);
51384+ voidptr = va_arg(ap, void *);
51385+ gr_log_middle_varargs(audit, msg, str1, voidptr);
51386+ break;
51387+ case GR_SIG2:
51388+ task = va_arg(ap, struct task_struct *);
51389+ cred = __task_cred(task);
51390+ pcred = __task_cred(task->real_parent);
51391+ num1 = va_arg(ap, int);
51392+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51393+ break;
51394+ case GR_CRASH1:
51395+ task = va_arg(ap, struct task_struct *);
51396+ cred = __task_cred(task);
51397+ pcred = __task_cred(task->real_parent);
51398+ ulong1 = va_arg(ap, unsigned long);
51399+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
51400+ break;
51401+ case GR_CRASH2:
51402+ task = va_arg(ap, struct task_struct *);
51403+ cred = __task_cred(task);
51404+ pcred = __task_cred(task->real_parent);
51405+ ulong1 = va_arg(ap, unsigned long);
51406+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
51407+ break;
51408+ case GR_RWXMAP:
51409+ file = va_arg(ap, struct file *);
51410+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
51411+ break;
51412+ case GR_PSACCT:
51413+ {
51414+ unsigned int wday, cday;
51415+ __u8 whr, chr;
51416+ __u8 wmin, cmin;
51417+ __u8 wsec, csec;
51418+ char cur_tty[64] = { 0 };
51419+ char parent_tty[64] = { 0 };
51420+
51421+ task = va_arg(ap, struct task_struct *);
51422+ wday = va_arg(ap, unsigned int);
51423+ cday = va_arg(ap, unsigned int);
51424+ whr = va_arg(ap, int);
51425+ chr = va_arg(ap, int);
51426+ wmin = va_arg(ap, int);
51427+ cmin = va_arg(ap, int);
51428+ wsec = va_arg(ap, int);
51429+ csec = va_arg(ap, int);
51430+ ulong1 = va_arg(ap, unsigned long);
51431+ cred = __task_cred(task);
51432+ pcred = __task_cred(task->real_parent);
51433+
51434+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51435+ }
51436+ break;
51437+ default:
51438+ gr_log_middle(audit, msg, ap);
51439+ }
51440+ va_end(ap);
51441+ gr_log_end(audit);
51442+ END_LOCKS(audit);
51443+}
51444diff -urNp linux-3.0.4/grsecurity/grsec_mem.c linux-3.0.4/grsecurity/grsec_mem.c
51445--- linux-3.0.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
51446+++ linux-3.0.4/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
51447@@ -0,0 +1,33 @@
51448+#include <linux/kernel.h>
51449+#include <linux/sched.h>
51450+#include <linux/mm.h>
51451+#include <linux/mman.h>
51452+#include <linux/grinternal.h>
51453+
51454+void
51455+gr_handle_ioperm(void)
51456+{
51457+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
51458+ return;
51459+}
51460+
51461+void
51462+gr_handle_iopl(void)
51463+{
51464+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
51465+ return;
51466+}
51467+
51468+void
51469+gr_handle_mem_readwrite(u64 from, u64 to)
51470+{
51471+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
51472+ return;
51473+}
51474+
51475+void
51476+gr_handle_vm86(void)
51477+{
51478+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
51479+ return;
51480+}
51481diff -urNp linux-3.0.4/grsecurity/grsec_mount.c linux-3.0.4/grsecurity/grsec_mount.c
51482--- linux-3.0.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
51483+++ linux-3.0.4/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
51484@@ -0,0 +1,62 @@
51485+#include <linux/kernel.h>
51486+#include <linux/sched.h>
51487+#include <linux/mount.h>
51488+#include <linux/grsecurity.h>
51489+#include <linux/grinternal.h>
51490+
51491+void
51492+gr_log_remount(const char *devname, const int retval)
51493+{
51494+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51495+ if (grsec_enable_mount && (retval >= 0))
51496+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
51497+#endif
51498+ return;
51499+}
51500+
51501+void
51502+gr_log_unmount(const char *devname, const int retval)
51503+{
51504+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51505+ if (grsec_enable_mount && (retval >= 0))
51506+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
51507+#endif
51508+ return;
51509+}
51510+
51511+void
51512+gr_log_mount(const char *from, const char *to, const int retval)
51513+{
51514+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51515+ if (grsec_enable_mount && (retval >= 0))
51516+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
51517+#endif
51518+ return;
51519+}
51520+
51521+int
51522+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
51523+{
51524+#ifdef CONFIG_GRKERNSEC_ROFS
51525+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
51526+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
51527+ return -EPERM;
51528+ } else
51529+ return 0;
51530+#endif
51531+ return 0;
51532+}
51533+
51534+int
51535+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
51536+{
51537+#ifdef CONFIG_GRKERNSEC_ROFS
51538+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
51539+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
51540+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
51541+ return -EPERM;
51542+ } else
51543+ return 0;
51544+#endif
51545+ return 0;
51546+}
51547diff -urNp linux-3.0.4/grsecurity/grsec_pax.c linux-3.0.4/grsecurity/grsec_pax.c
51548--- linux-3.0.4/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
51549+++ linux-3.0.4/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
51550@@ -0,0 +1,36 @@
51551+#include <linux/kernel.h>
51552+#include <linux/sched.h>
51553+#include <linux/mm.h>
51554+#include <linux/file.h>
51555+#include <linux/grinternal.h>
51556+#include <linux/grsecurity.h>
51557+
51558+void
51559+gr_log_textrel(struct vm_area_struct * vma)
51560+{
51561+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51562+ if (grsec_enable_audit_textrel)
51563+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
51564+#endif
51565+ return;
51566+}
51567+
51568+void
51569+gr_log_rwxmmap(struct file *file)
51570+{
51571+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51572+ if (grsec_enable_log_rwxmaps)
51573+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
51574+#endif
51575+ return;
51576+}
51577+
51578+void
51579+gr_log_rwxmprotect(struct file *file)
51580+{
51581+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51582+ if (grsec_enable_log_rwxmaps)
51583+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
51584+#endif
51585+ return;
51586+}
51587diff -urNp linux-3.0.4/grsecurity/grsec_ptrace.c linux-3.0.4/grsecurity/grsec_ptrace.c
51588--- linux-3.0.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
51589+++ linux-3.0.4/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
51590@@ -0,0 +1,14 @@
51591+#include <linux/kernel.h>
51592+#include <linux/sched.h>
51593+#include <linux/grinternal.h>
51594+#include <linux/grsecurity.h>
51595+
51596+void
51597+gr_audit_ptrace(struct task_struct *task)
51598+{
51599+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51600+ if (grsec_enable_audit_ptrace)
51601+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
51602+#endif
51603+ return;
51604+}
51605diff -urNp linux-3.0.4/grsecurity/grsec_sig.c linux-3.0.4/grsecurity/grsec_sig.c
51606--- linux-3.0.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
51607+++ linux-3.0.4/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
51608@@ -0,0 +1,206 @@
51609+#include <linux/kernel.h>
51610+#include <linux/sched.h>
51611+#include <linux/delay.h>
51612+#include <linux/grsecurity.h>
51613+#include <linux/grinternal.h>
51614+#include <linux/hardirq.h>
51615+
51616+char *signames[] = {
51617+ [SIGSEGV] = "Segmentation fault",
51618+ [SIGILL] = "Illegal instruction",
51619+ [SIGABRT] = "Abort",
51620+ [SIGBUS] = "Invalid alignment/Bus error"
51621+};
51622+
51623+void
51624+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
51625+{
51626+#ifdef CONFIG_GRKERNSEC_SIGNAL
51627+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
51628+ (sig == SIGABRT) || (sig == SIGBUS))) {
51629+ if (t->pid == current->pid) {
51630+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
51631+ } else {
51632+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
51633+ }
51634+ }
51635+#endif
51636+ return;
51637+}
51638+
51639+int
51640+gr_handle_signal(const struct task_struct *p, const int sig)
51641+{
51642+#ifdef CONFIG_GRKERNSEC
51643+ if (current->pid > 1 && gr_check_protected_task(p)) {
51644+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
51645+ return -EPERM;
51646+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
51647+ return -EPERM;
51648+ }
51649+#endif
51650+ return 0;
51651+}
51652+
51653+#ifdef CONFIG_GRKERNSEC
51654+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
51655+
51656+int gr_fake_force_sig(int sig, struct task_struct *t)
51657+{
51658+ unsigned long int flags;
51659+ int ret, blocked, ignored;
51660+ struct k_sigaction *action;
51661+
51662+ spin_lock_irqsave(&t->sighand->siglock, flags);
51663+ action = &t->sighand->action[sig-1];
51664+ ignored = action->sa.sa_handler == SIG_IGN;
51665+ blocked = sigismember(&t->blocked, sig);
51666+ if (blocked || ignored) {
51667+ action->sa.sa_handler = SIG_DFL;
51668+ if (blocked) {
51669+ sigdelset(&t->blocked, sig);
51670+ recalc_sigpending_and_wake(t);
51671+ }
51672+ }
51673+ if (action->sa.sa_handler == SIG_DFL)
51674+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
51675+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
51676+
51677+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
51678+
51679+ return ret;
51680+}
51681+#endif
51682+
51683+#ifdef CONFIG_GRKERNSEC_BRUTE
51684+#define GR_USER_BAN_TIME (15 * 60)
51685+
51686+static int __get_dumpable(unsigned long mm_flags)
51687+{
51688+ int ret;
51689+
51690+ ret = mm_flags & MMF_DUMPABLE_MASK;
51691+ return (ret >= 2) ? 2 : ret;
51692+}
51693+#endif
51694+
51695+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
51696+{
51697+#ifdef CONFIG_GRKERNSEC_BRUTE
51698+ uid_t uid = 0;
51699+
51700+ if (!grsec_enable_brute)
51701+ return;
51702+
51703+ rcu_read_lock();
51704+ read_lock(&tasklist_lock);
51705+ read_lock(&grsec_exec_file_lock);
51706+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
51707+ p->real_parent->brute = 1;
51708+ else {
51709+ const struct cred *cred = __task_cred(p), *cred2;
51710+ struct task_struct *tsk, *tsk2;
51711+
51712+ if (!__get_dumpable(mm_flags) && cred->uid) {
51713+ struct user_struct *user;
51714+
51715+ uid = cred->uid;
51716+
51717+ /* this is put upon execution past expiration */
51718+ user = find_user(uid);
51719+ if (user == NULL)
51720+ goto unlock;
51721+ user->banned = 1;
51722+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
51723+ if (user->ban_expires == ~0UL)
51724+ user->ban_expires--;
51725+
51726+ do_each_thread(tsk2, tsk) {
51727+ cred2 = __task_cred(tsk);
51728+ if (tsk != p && cred2->uid == uid)
51729+ gr_fake_force_sig(SIGKILL, tsk);
51730+ } while_each_thread(tsk2, tsk);
51731+ }
51732+ }
51733+unlock:
51734+ read_unlock(&grsec_exec_file_lock);
51735+ read_unlock(&tasklist_lock);
51736+ rcu_read_unlock();
51737+
51738+ if (uid)
51739+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
51740+
51741+#endif
51742+ return;
51743+}
51744+
51745+void gr_handle_brute_check(void)
51746+{
51747+#ifdef CONFIG_GRKERNSEC_BRUTE
51748+ if (current->brute)
51749+ msleep(30 * 1000);
51750+#endif
51751+ return;
51752+}
51753+
51754+void gr_handle_kernel_exploit(void)
51755+{
51756+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
51757+ const struct cred *cred;
51758+ struct task_struct *tsk, *tsk2;
51759+ struct user_struct *user;
51760+ uid_t uid;
51761+
51762+ if (in_irq() || in_serving_softirq() || in_nmi())
51763+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
51764+
51765+ uid = current_uid();
51766+
51767+ if (uid == 0)
51768+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
51769+ else {
51770+ /* kill all the processes of this user, hold a reference
51771+ to their creds struct, and prevent them from creating
51772+ another process until system reset
51773+ */
51774+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
51775+ /* we intentionally leak this ref */
51776+ user = get_uid(current->cred->user);
51777+ if (user) {
51778+ user->banned = 1;
51779+ user->ban_expires = ~0UL;
51780+ }
51781+
51782+ read_lock(&tasklist_lock);
51783+ do_each_thread(tsk2, tsk) {
51784+ cred = __task_cred(tsk);
51785+ if (cred->uid == uid)
51786+ gr_fake_force_sig(SIGKILL, tsk);
51787+ } while_each_thread(tsk2, tsk);
51788+ read_unlock(&tasklist_lock);
51789+ }
51790+#endif
51791+}
51792+
51793+int __gr_process_user_ban(struct user_struct *user)
51794+{
51795+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51796+ if (unlikely(user->banned)) {
51797+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
51798+ user->banned = 0;
51799+ user->ban_expires = 0;
51800+ free_uid(user);
51801+ } else
51802+ return -EPERM;
51803+ }
51804+#endif
51805+ return 0;
51806+}
51807+
51808+int gr_process_user_ban(void)
51809+{
51810+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51811+ return __gr_process_user_ban(current->cred->user);
51812+#endif
51813+ return 0;
51814+}
51815diff -urNp linux-3.0.4/grsecurity/grsec_sock.c linux-3.0.4/grsecurity/grsec_sock.c
51816--- linux-3.0.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
51817+++ linux-3.0.4/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
51818@@ -0,0 +1,244 @@
51819+#include <linux/kernel.h>
51820+#include <linux/module.h>
51821+#include <linux/sched.h>
51822+#include <linux/file.h>
51823+#include <linux/net.h>
51824+#include <linux/in.h>
51825+#include <linux/ip.h>
51826+#include <net/sock.h>
51827+#include <net/inet_sock.h>
51828+#include <linux/grsecurity.h>
51829+#include <linux/grinternal.h>
51830+#include <linux/gracl.h>
51831+
51832+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
51833+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
51834+
51835+EXPORT_SYMBOL(gr_search_udp_recvmsg);
51836+EXPORT_SYMBOL(gr_search_udp_sendmsg);
51837+
51838+#ifdef CONFIG_UNIX_MODULE
51839+EXPORT_SYMBOL(gr_acl_handle_unix);
51840+EXPORT_SYMBOL(gr_acl_handle_mknod);
51841+EXPORT_SYMBOL(gr_handle_chroot_unix);
51842+EXPORT_SYMBOL(gr_handle_create);
51843+#endif
51844+
51845+#ifdef CONFIG_GRKERNSEC
51846+#define gr_conn_table_size 32749
51847+struct conn_table_entry {
51848+ struct conn_table_entry *next;
51849+ struct signal_struct *sig;
51850+};
51851+
51852+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
51853+DEFINE_SPINLOCK(gr_conn_table_lock);
51854+
51855+extern const char * gr_socktype_to_name(unsigned char type);
51856+extern const char * gr_proto_to_name(unsigned char proto);
51857+extern const char * gr_sockfamily_to_name(unsigned char family);
51858+
51859+static __inline__ int
51860+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
51861+{
51862+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
51863+}
51864+
51865+static __inline__ int
51866+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
51867+ __u16 sport, __u16 dport)
51868+{
51869+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
51870+ sig->gr_sport == sport && sig->gr_dport == dport))
51871+ return 1;
51872+ else
51873+ return 0;
51874+}
51875+
51876+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
51877+{
51878+ struct conn_table_entry **match;
51879+ unsigned int index;
51880+
51881+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51882+ sig->gr_sport, sig->gr_dport,
51883+ gr_conn_table_size);
51884+
51885+ newent->sig = sig;
51886+
51887+ match = &gr_conn_table[index];
51888+ newent->next = *match;
51889+ *match = newent;
51890+
51891+ return;
51892+}
51893+
51894+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
51895+{
51896+ struct conn_table_entry *match, *last = NULL;
51897+ unsigned int index;
51898+
51899+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51900+ sig->gr_sport, sig->gr_dport,
51901+ gr_conn_table_size);
51902+
51903+ match = gr_conn_table[index];
51904+ while (match && !conn_match(match->sig,
51905+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
51906+ sig->gr_dport)) {
51907+ last = match;
51908+ match = match->next;
51909+ }
51910+
51911+ if (match) {
51912+ if (last)
51913+ last->next = match->next;
51914+ else
51915+ gr_conn_table[index] = NULL;
51916+ kfree(match);
51917+ }
51918+
51919+ return;
51920+}
51921+
51922+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
51923+ __u16 sport, __u16 dport)
51924+{
51925+ struct conn_table_entry *match;
51926+ unsigned int index;
51927+
51928+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
51929+
51930+ match = gr_conn_table[index];
51931+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
51932+ match = match->next;
51933+
51934+ if (match)
51935+ return match->sig;
51936+ else
51937+ return NULL;
51938+}
51939+
51940+#endif
51941+
51942+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
51943+{
51944+#ifdef CONFIG_GRKERNSEC
51945+ struct signal_struct *sig = task->signal;
51946+ struct conn_table_entry *newent;
51947+
51948+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
51949+ if (newent == NULL)
51950+ return;
51951+ /* no bh lock needed since we are called with bh disabled */
51952+ spin_lock(&gr_conn_table_lock);
51953+ gr_del_task_from_ip_table_nolock(sig);
51954+ sig->gr_saddr = inet->inet_rcv_saddr;
51955+ sig->gr_daddr = inet->inet_daddr;
51956+ sig->gr_sport = inet->inet_sport;
51957+ sig->gr_dport = inet->inet_dport;
51958+ gr_add_to_task_ip_table_nolock(sig, newent);
51959+ spin_unlock(&gr_conn_table_lock);
51960+#endif
51961+ return;
51962+}
51963+
51964+void gr_del_task_from_ip_table(struct task_struct *task)
51965+{
51966+#ifdef CONFIG_GRKERNSEC
51967+ spin_lock_bh(&gr_conn_table_lock);
51968+ gr_del_task_from_ip_table_nolock(task->signal);
51969+ spin_unlock_bh(&gr_conn_table_lock);
51970+#endif
51971+ return;
51972+}
51973+
51974+void
51975+gr_attach_curr_ip(const struct sock *sk)
51976+{
51977+#ifdef CONFIG_GRKERNSEC
51978+ struct signal_struct *p, *set;
51979+ const struct inet_sock *inet = inet_sk(sk);
51980+
51981+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
51982+ return;
51983+
51984+ set = current->signal;
51985+
51986+ spin_lock_bh(&gr_conn_table_lock);
51987+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
51988+ inet->inet_dport, inet->inet_sport);
51989+ if (unlikely(p != NULL)) {
51990+ set->curr_ip = p->curr_ip;
51991+ set->used_accept = 1;
51992+ gr_del_task_from_ip_table_nolock(p);
51993+ spin_unlock_bh(&gr_conn_table_lock);
51994+ return;
51995+ }
51996+ spin_unlock_bh(&gr_conn_table_lock);
51997+
51998+ set->curr_ip = inet->inet_daddr;
51999+ set->used_accept = 1;
52000+#endif
52001+ return;
52002+}
52003+
52004+int
52005+gr_handle_sock_all(const int family, const int type, const int protocol)
52006+{
52007+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52008+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
52009+ (family != AF_UNIX)) {
52010+ if (family == AF_INET)
52011+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
52012+ else
52013+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
52014+ return -EACCES;
52015+ }
52016+#endif
52017+ return 0;
52018+}
52019+
52020+int
52021+gr_handle_sock_server(const struct sockaddr *sck)
52022+{
52023+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52024+ if (grsec_enable_socket_server &&
52025+ in_group_p(grsec_socket_server_gid) &&
52026+ sck && (sck->sa_family != AF_UNIX) &&
52027+ (sck->sa_family != AF_LOCAL)) {
52028+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
52029+ return -EACCES;
52030+ }
52031+#endif
52032+ return 0;
52033+}
52034+
52035+int
52036+gr_handle_sock_server_other(const struct sock *sck)
52037+{
52038+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52039+ if (grsec_enable_socket_server &&
52040+ in_group_p(grsec_socket_server_gid) &&
52041+ sck && (sck->sk_family != AF_UNIX) &&
52042+ (sck->sk_family != AF_LOCAL)) {
52043+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
52044+ return -EACCES;
52045+ }
52046+#endif
52047+ return 0;
52048+}
52049+
52050+int
52051+gr_handle_sock_client(const struct sockaddr *sck)
52052+{
52053+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52054+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
52055+ sck && (sck->sa_family != AF_UNIX) &&
52056+ (sck->sa_family != AF_LOCAL)) {
52057+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
52058+ return -EACCES;
52059+ }
52060+#endif
52061+ return 0;
52062+}
52063diff -urNp linux-3.0.4/grsecurity/grsec_sysctl.c linux-3.0.4/grsecurity/grsec_sysctl.c
52064--- linux-3.0.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
52065+++ linux-3.0.4/grsecurity/grsec_sysctl.c 2011-08-25 17:26:15.000000000 -0400
52066@@ -0,0 +1,433 @@
52067+#include <linux/kernel.h>
52068+#include <linux/sched.h>
52069+#include <linux/sysctl.h>
52070+#include <linux/grsecurity.h>
52071+#include <linux/grinternal.h>
52072+
52073+int
52074+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
52075+{
52076+#ifdef CONFIG_GRKERNSEC_SYSCTL
52077+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
52078+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
52079+ return -EACCES;
52080+ }
52081+#endif
52082+ return 0;
52083+}
52084+
52085+#ifdef CONFIG_GRKERNSEC_ROFS
52086+static int __maybe_unused one = 1;
52087+#endif
52088+
52089+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
52090+struct ctl_table grsecurity_table[] = {
52091+#ifdef CONFIG_GRKERNSEC_SYSCTL
52092+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
52093+#ifdef CONFIG_GRKERNSEC_IO
52094+ {
52095+ .procname = "disable_priv_io",
52096+ .data = &grsec_disable_privio,
52097+ .maxlen = sizeof(int),
52098+ .mode = 0600,
52099+ .proc_handler = &proc_dointvec,
52100+ },
52101+#endif
52102+#endif
52103+#ifdef CONFIG_GRKERNSEC_LINK
52104+ {
52105+ .procname = "linking_restrictions",
52106+ .data = &grsec_enable_link,
52107+ .maxlen = sizeof(int),
52108+ .mode = 0600,
52109+ .proc_handler = &proc_dointvec,
52110+ },
52111+#endif
52112+#ifdef CONFIG_GRKERNSEC_BRUTE
52113+ {
52114+ .procname = "deter_bruteforce",
52115+ .data = &grsec_enable_brute,
52116+ .maxlen = sizeof(int),
52117+ .mode = 0600,
52118+ .proc_handler = &proc_dointvec,
52119+ },
52120+#endif
52121+#ifdef CONFIG_GRKERNSEC_FIFO
52122+ {
52123+ .procname = "fifo_restrictions",
52124+ .data = &grsec_enable_fifo,
52125+ .maxlen = sizeof(int),
52126+ .mode = 0600,
52127+ .proc_handler = &proc_dointvec,
52128+ },
52129+#endif
52130+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52131+ {
52132+ .procname = "ip_blackhole",
52133+ .data = &grsec_enable_blackhole,
52134+ .maxlen = sizeof(int),
52135+ .mode = 0600,
52136+ .proc_handler = &proc_dointvec,
52137+ },
52138+ {
52139+ .procname = "lastack_retries",
52140+ .data = &grsec_lastack_retries,
52141+ .maxlen = sizeof(int),
52142+ .mode = 0600,
52143+ .proc_handler = &proc_dointvec,
52144+ },
52145+#endif
52146+#ifdef CONFIG_GRKERNSEC_EXECLOG
52147+ {
52148+ .procname = "exec_logging",
52149+ .data = &grsec_enable_execlog,
52150+ .maxlen = sizeof(int),
52151+ .mode = 0600,
52152+ .proc_handler = &proc_dointvec,
52153+ },
52154+#endif
52155+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52156+ {
52157+ .procname = "rwxmap_logging",
52158+ .data = &grsec_enable_log_rwxmaps,
52159+ .maxlen = sizeof(int),
52160+ .mode = 0600,
52161+ .proc_handler = &proc_dointvec,
52162+ },
52163+#endif
52164+#ifdef CONFIG_GRKERNSEC_SIGNAL
52165+ {
52166+ .procname = "signal_logging",
52167+ .data = &grsec_enable_signal,
52168+ .maxlen = sizeof(int),
52169+ .mode = 0600,
52170+ .proc_handler = &proc_dointvec,
52171+ },
52172+#endif
52173+#ifdef CONFIG_GRKERNSEC_FORKFAIL
52174+ {
52175+ .procname = "forkfail_logging",
52176+ .data = &grsec_enable_forkfail,
52177+ .maxlen = sizeof(int),
52178+ .mode = 0600,
52179+ .proc_handler = &proc_dointvec,
52180+ },
52181+#endif
52182+#ifdef CONFIG_GRKERNSEC_TIME
52183+ {
52184+ .procname = "timechange_logging",
52185+ .data = &grsec_enable_time,
52186+ .maxlen = sizeof(int),
52187+ .mode = 0600,
52188+ .proc_handler = &proc_dointvec,
52189+ },
52190+#endif
52191+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52192+ {
52193+ .procname = "chroot_deny_shmat",
52194+ .data = &grsec_enable_chroot_shmat,
52195+ .maxlen = sizeof(int),
52196+ .mode = 0600,
52197+ .proc_handler = &proc_dointvec,
52198+ },
52199+#endif
52200+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52201+ {
52202+ .procname = "chroot_deny_unix",
52203+ .data = &grsec_enable_chroot_unix,
52204+ .maxlen = sizeof(int),
52205+ .mode = 0600,
52206+ .proc_handler = &proc_dointvec,
52207+ },
52208+#endif
52209+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52210+ {
52211+ .procname = "chroot_deny_mount",
52212+ .data = &grsec_enable_chroot_mount,
52213+ .maxlen = sizeof(int),
52214+ .mode = 0600,
52215+ .proc_handler = &proc_dointvec,
52216+ },
52217+#endif
52218+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52219+ {
52220+ .procname = "chroot_deny_fchdir",
52221+ .data = &grsec_enable_chroot_fchdir,
52222+ .maxlen = sizeof(int),
52223+ .mode = 0600,
52224+ .proc_handler = &proc_dointvec,
52225+ },
52226+#endif
52227+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52228+ {
52229+ .procname = "chroot_deny_chroot",
52230+ .data = &grsec_enable_chroot_double,
52231+ .maxlen = sizeof(int),
52232+ .mode = 0600,
52233+ .proc_handler = &proc_dointvec,
52234+ },
52235+#endif
52236+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52237+ {
52238+ .procname = "chroot_deny_pivot",
52239+ .data = &grsec_enable_chroot_pivot,
52240+ .maxlen = sizeof(int),
52241+ .mode = 0600,
52242+ .proc_handler = &proc_dointvec,
52243+ },
52244+#endif
52245+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52246+ {
52247+ .procname = "chroot_enforce_chdir",
52248+ .data = &grsec_enable_chroot_chdir,
52249+ .maxlen = sizeof(int),
52250+ .mode = 0600,
52251+ .proc_handler = &proc_dointvec,
52252+ },
52253+#endif
52254+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52255+ {
52256+ .procname = "chroot_deny_chmod",
52257+ .data = &grsec_enable_chroot_chmod,
52258+ .maxlen = sizeof(int),
52259+ .mode = 0600,
52260+ .proc_handler = &proc_dointvec,
52261+ },
52262+#endif
52263+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52264+ {
52265+ .procname = "chroot_deny_mknod",
52266+ .data = &grsec_enable_chroot_mknod,
52267+ .maxlen = sizeof(int),
52268+ .mode = 0600,
52269+ .proc_handler = &proc_dointvec,
52270+ },
52271+#endif
52272+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52273+ {
52274+ .procname = "chroot_restrict_nice",
52275+ .data = &grsec_enable_chroot_nice,
52276+ .maxlen = sizeof(int),
52277+ .mode = 0600,
52278+ .proc_handler = &proc_dointvec,
52279+ },
52280+#endif
52281+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52282+ {
52283+ .procname = "chroot_execlog",
52284+ .data = &grsec_enable_chroot_execlog,
52285+ .maxlen = sizeof(int),
52286+ .mode = 0600,
52287+ .proc_handler = &proc_dointvec,
52288+ },
52289+#endif
52290+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52291+ {
52292+ .procname = "chroot_caps",
52293+ .data = &grsec_enable_chroot_caps,
52294+ .maxlen = sizeof(int),
52295+ .mode = 0600,
52296+ .proc_handler = &proc_dointvec,
52297+ },
52298+#endif
52299+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52300+ {
52301+ .procname = "chroot_deny_sysctl",
52302+ .data = &grsec_enable_chroot_sysctl,
52303+ .maxlen = sizeof(int),
52304+ .mode = 0600,
52305+ .proc_handler = &proc_dointvec,
52306+ },
52307+#endif
52308+#ifdef CONFIG_GRKERNSEC_TPE
52309+ {
52310+ .procname = "tpe",
52311+ .data = &grsec_enable_tpe,
52312+ .maxlen = sizeof(int),
52313+ .mode = 0600,
52314+ .proc_handler = &proc_dointvec,
52315+ },
52316+ {
52317+ .procname = "tpe_gid",
52318+ .data = &grsec_tpe_gid,
52319+ .maxlen = sizeof(int),
52320+ .mode = 0600,
52321+ .proc_handler = &proc_dointvec,
52322+ },
52323+#endif
52324+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52325+ {
52326+ .procname = "tpe_invert",
52327+ .data = &grsec_enable_tpe_invert,
52328+ .maxlen = sizeof(int),
52329+ .mode = 0600,
52330+ .proc_handler = &proc_dointvec,
52331+ },
52332+#endif
52333+#ifdef CONFIG_GRKERNSEC_TPE_ALL
52334+ {
52335+ .procname = "tpe_restrict_all",
52336+ .data = &grsec_enable_tpe_all,
52337+ .maxlen = sizeof(int),
52338+ .mode = 0600,
52339+ .proc_handler = &proc_dointvec,
52340+ },
52341+#endif
52342+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52343+ {
52344+ .procname = "socket_all",
52345+ .data = &grsec_enable_socket_all,
52346+ .maxlen = sizeof(int),
52347+ .mode = 0600,
52348+ .proc_handler = &proc_dointvec,
52349+ },
52350+ {
52351+ .procname = "socket_all_gid",
52352+ .data = &grsec_socket_all_gid,
52353+ .maxlen = sizeof(int),
52354+ .mode = 0600,
52355+ .proc_handler = &proc_dointvec,
52356+ },
52357+#endif
52358+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52359+ {
52360+ .procname = "socket_client",
52361+ .data = &grsec_enable_socket_client,
52362+ .maxlen = sizeof(int),
52363+ .mode = 0600,
52364+ .proc_handler = &proc_dointvec,
52365+ },
52366+ {
52367+ .procname = "socket_client_gid",
52368+ .data = &grsec_socket_client_gid,
52369+ .maxlen = sizeof(int),
52370+ .mode = 0600,
52371+ .proc_handler = &proc_dointvec,
52372+ },
52373+#endif
52374+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52375+ {
52376+ .procname = "socket_server",
52377+ .data = &grsec_enable_socket_server,
52378+ .maxlen = sizeof(int),
52379+ .mode = 0600,
52380+ .proc_handler = &proc_dointvec,
52381+ },
52382+ {
52383+ .procname = "socket_server_gid",
52384+ .data = &grsec_socket_server_gid,
52385+ .maxlen = sizeof(int),
52386+ .mode = 0600,
52387+ .proc_handler = &proc_dointvec,
52388+ },
52389+#endif
52390+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
52391+ {
52392+ .procname = "audit_group",
52393+ .data = &grsec_enable_group,
52394+ .maxlen = sizeof(int),
52395+ .mode = 0600,
52396+ .proc_handler = &proc_dointvec,
52397+ },
52398+ {
52399+ .procname = "audit_gid",
52400+ .data = &grsec_audit_gid,
52401+ .maxlen = sizeof(int),
52402+ .mode = 0600,
52403+ .proc_handler = &proc_dointvec,
52404+ },
52405+#endif
52406+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52407+ {
52408+ .procname = "audit_chdir",
52409+ .data = &grsec_enable_chdir,
52410+ .maxlen = sizeof(int),
52411+ .mode = 0600,
52412+ .proc_handler = &proc_dointvec,
52413+ },
52414+#endif
52415+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52416+ {
52417+ .procname = "audit_mount",
52418+ .data = &grsec_enable_mount,
52419+ .maxlen = sizeof(int),
52420+ .mode = 0600,
52421+ .proc_handler = &proc_dointvec,
52422+ },
52423+#endif
52424+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52425+ {
52426+ .procname = "audit_textrel",
52427+ .data = &grsec_enable_audit_textrel,
52428+ .maxlen = sizeof(int),
52429+ .mode = 0600,
52430+ .proc_handler = &proc_dointvec,
52431+ },
52432+#endif
52433+#ifdef CONFIG_GRKERNSEC_DMESG
52434+ {
52435+ .procname = "dmesg",
52436+ .data = &grsec_enable_dmesg,
52437+ .maxlen = sizeof(int),
52438+ .mode = 0600,
52439+ .proc_handler = &proc_dointvec,
52440+ },
52441+#endif
52442+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52443+ {
52444+ .procname = "chroot_findtask",
52445+ .data = &grsec_enable_chroot_findtask,
52446+ .maxlen = sizeof(int),
52447+ .mode = 0600,
52448+ .proc_handler = &proc_dointvec,
52449+ },
52450+#endif
52451+#ifdef CONFIG_GRKERNSEC_RESLOG
52452+ {
52453+ .procname = "resource_logging",
52454+ .data = &grsec_resource_logging,
52455+ .maxlen = sizeof(int),
52456+ .mode = 0600,
52457+ .proc_handler = &proc_dointvec,
52458+ },
52459+#endif
52460+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52461+ {
52462+ .procname = "audit_ptrace",
52463+ .data = &grsec_enable_audit_ptrace,
52464+ .maxlen = sizeof(int),
52465+ .mode = 0600,
52466+ .proc_handler = &proc_dointvec,
52467+ },
52468+#endif
52469+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52470+ {
52471+ .procname = "harden_ptrace",
52472+ .data = &grsec_enable_harden_ptrace,
52473+ .maxlen = sizeof(int),
52474+ .mode = 0600,
52475+ .proc_handler = &proc_dointvec,
52476+ },
52477+#endif
52478+ {
52479+ .procname = "grsec_lock",
52480+ .data = &grsec_lock,
52481+ .maxlen = sizeof(int),
52482+ .mode = 0600,
52483+ .proc_handler = &proc_dointvec,
52484+ },
52485+#endif
52486+#ifdef CONFIG_GRKERNSEC_ROFS
52487+ {
52488+ .procname = "romount_protect",
52489+ .data = &grsec_enable_rofs,
52490+ .maxlen = sizeof(int),
52491+ .mode = 0600,
52492+ .proc_handler = &proc_dointvec_minmax,
52493+ .extra1 = &one,
52494+ .extra2 = &one,
52495+ },
52496+#endif
52497+ { }
52498+};
52499+#endif
52500diff -urNp linux-3.0.4/grsecurity/grsec_time.c linux-3.0.4/grsecurity/grsec_time.c
52501--- linux-3.0.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
52502+++ linux-3.0.4/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
52503@@ -0,0 +1,16 @@
52504+#include <linux/kernel.h>
52505+#include <linux/sched.h>
52506+#include <linux/grinternal.h>
52507+#include <linux/module.h>
52508+
52509+void
52510+gr_log_timechange(void)
52511+{
52512+#ifdef CONFIG_GRKERNSEC_TIME
52513+ if (grsec_enable_time)
52514+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
52515+#endif
52516+ return;
52517+}
52518+
52519+EXPORT_SYMBOL(gr_log_timechange);
52520diff -urNp linux-3.0.4/grsecurity/grsec_tpe.c linux-3.0.4/grsecurity/grsec_tpe.c
52521--- linux-3.0.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
52522+++ linux-3.0.4/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
52523@@ -0,0 +1,39 @@
52524+#include <linux/kernel.h>
52525+#include <linux/sched.h>
52526+#include <linux/file.h>
52527+#include <linux/fs.h>
52528+#include <linux/grinternal.h>
52529+
52530+extern int gr_acl_tpe_check(void);
52531+
52532+int
52533+gr_tpe_allow(const struct file *file)
52534+{
52535+#ifdef CONFIG_GRKERNSEC
52536+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
52537+ const struct cred *cred = current_cred();
52538+
52539+ if (cred->uid && ((grsec_enable_tpe &&
52540+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52541+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
52542+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
52543+#else
52544+ in_group_p(grsec_tpe_gid)
52545+#endif
52546+ ) || gr_acl_tpe_check()) &&
52547+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
52548+ (inode->i_mode & S_IWOTH))))) {
52549+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52550+ return 0;
52551+ }
52552+#ifdef CONFIG_GRKERNSEC_TPE_ALL
52553+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
52554+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
52555+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
52556+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52557+ return 0;
52558+ }
52559+#endif
52560+#endif
52561+ return 1;
52562+}
52563diff -urNp linux-3.0.4/grsecurity/grsum.c linux-3.0.4/grsecurity/grsum.c
52564--- linux-3.0.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
52565+++ linux-3.0.4/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
52566@@ -0,0 +1,61 @@
52567+#include <linux/err.h>
52568+#include <linux/kernel.h>
52569+#include <linux/sched.h>
52570+#include <linux/mm.h>
52571+#include <linux/scatterlist.h>
52572+#include <linux/crypto.h>
52573+#include <linux/gracl.h>
52574+
52575+
52576+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
52577+#error "crypto and sha256 must be built into the kernel"
52578+#endif
52579+
52580+int
52581+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
52582+{
52583+ char *p;
52584+ struct crypto_hash *tfm;
52585+ struct hash_desc desc;
52586+ struct scatterlist sg;
52587+ unsigned char temp_sum[GR_SHA_LEN];
52588+ volatile int retval = 0;
52589+ volatile int dummy = 0;
52590+ unsigned int i;
52591+
52592+ sg_init_table(&sg, 1);
52593+
52594+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
52595+ if (IS_ERR(tfm)) {
52596+ /* should never happen, since sha256 should be built in */
52597+ return 1;
52598+ }
52599+
52600+ desc.tfm = tfm;
52601+ desc.flags = 0;
52602+
52603+ crypto_hash_init(&desc);
52604+
52605+ p = salt;
52606+ sg_set_buf(&sg, p, GR_SALT_LEN);
52607+ crypto_hash_update(&desc, &sg, sg.length);
52608+
52609+ p = entry->pw;
52610+ sg_set_buf(&sg, p, strlen(p));
52611+
52612+ crypto_hash_update(&desc, &sg, sg.length);
52613+
52614+ crypto_hash_final(&desc, temp_sum);
52615+
52616+ memset(entry->pw, 0, GR_PW_LEN);
52617+
52618+ for (i = 0; i < GR_SHA_LEN; i++)
52619+ if (sum[i] != temp_sum[i])
52620+ retval = 1;
52621+ else
52622+ dummy = 1; // waste a cycle
52623+
52624+ crypto_free_hash(tfm);
52625+
52626+ return retval;
52627+}
52628diff -urNp linux-3.0.4/grsecurity/Kconfig linux-3.0.4/grsecurity/Kconfig
52629--- linux-3.0.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
52630+++ linux-3.0.4/grsecurity/Kconfig 2011-09-15 00:00:57.000000000 -0400
52631@@ -0,0 +1,1038 @@
52632+#
52633+# grecurity configuration
52634+#
52635+
52636+menu "Grsecurity"
52637+
52638+config GRKERNSEC
52639+ bool "Grsecurity"
52640+ select CRYPTO
52641+ select CRYPTO_SHA256
52642+ help
52643+ If you say Y here, you will be able to configure many features
52644+ that will enhance the security of your system. It is highly
52645+ recommended that you say Y here and read through the help
52646+ for each option so that you fully understand the features and
52647+ can evaluate their usefulness for your machine.
52648+
52649+choice
52650+ prompt "Security Level"
52651+ depends on GRKERNSEC
52652+ default GRKERNSEC_CUSTOM
52653+
52654+config GRKERNSEC_LOW
52655+ bool "Low"
52656+ select GRKERNSEC_LINK
52657+ select GRKERNSEC_FIFO
52658+ select GRKERNSEC_RANDNET
52659+ select GRKERNSEC_DMESG
52660+ select GRKERNSEC_CHROOT
52661+ select GRKERNSEC_CHROOT_CHDIR
52662+
52663+ help
52664+ If you choose this option, several of the grsecurity options will
52665+ be enabled that will give you greater protection against a number
52666+ of attacks, while assuring that none of your software will have any
52667+ conflicts with the additional security measures. If you run a lot
52668+ of unusual software, or you are having problems with the higher
52669+ security levels, you should say Y here. With this option, the
52670+ following features are enabled:
52671+
52672+ - Linking restrictions
52673+ - FIFO restrictions
52674+ - Restricted dmesg
52675+ - Enforced chdir("/") on chroot
52676+ - Runtime module disabling
52677+
52678+config GRKERNSEC_MEDIUM
52679+ bool "Medium"
52680+ select PAX
52681+ select PAX_EI_PAX
52682+ select PAX_PT_PAX_FLAGS
52683+ select PAX_HAVE_ACL_FLAGS
52684+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52685+ select GRKERNSEC_CHROOT
52686+ select GRKERNSEC_CHROOT_SYSCTL
52687+ select GRKERNSEC_LINK
52688+ select GRKERNSEC_FIFO
52689+ select GRKERNSEC_DMESG
52690+ select GRKERNSEC_RANDNET
52691+ select GRKERNSEC_FORKFAIL
52692+ select GRKERNSEC_TIME
52693+ select GRKERNSEC_SIGNAL
52694+ select GRKERNSEC_CHROOT
52695+ select GRKERNSEC_CHROOT_UNIX
52696+ select GRKERNSEC_CHROOT_MOUNT
52697+ select GRKERNSEC_CHROOT_PIVOT
52698+ select GRKERNSEC_CHROOT_DOUBLE
52699+ select GRKERNSEC_CHROOT_CHDIR
52700+ select GRKERNSEC_CHROOT_MKNOD
52701+ select GRKERNSEC_PROC
52702+ select GRKERNSEC_PROC_USERGROUP
52703+ select PAX_RANDUSTACK
52704+ select PAX_ASLR
52705+ select PAX_RANDMMAP
52706+ select PAX_REFCOUNT if (X86 || SPARC64)
52707+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
52708+
52709+ help
52710+ If you say Y here, several features in addition to those included
52711+ in the low additional security level will be enabled. These
52712+ features provide even more security to your system, though in rare
52713+ cases they may be incompatible with very old or poorly written
52714+ software. If you enable this option, make sure that your auth
52715+ service (identd) is running as gid 1001. With this option,
52716+ the following features (in addition to those provided in the
52717+ low additional security level) will be enabled:
52718+
52719+ - Failed fork logging
52720+ - Time change logging
52721+ - Signal logging
52722+ - Deny mounts in chroot
52723+ - Deny double chrooting
52724+ - Deny sysctl writes in chroot
52725+ - Deny mknod in chroot
52726+ - Deny access to abstract AF_UNIX sockets out of chroot
52727+ - Deny pivot_root in chroot
52728+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
52729+ - /proc restrictions with special GID set to 10 (usually wheel)
52730+ - Address Space Layout Randomization (ASLR)
52731+ - Prevent exploitation of most refcount overflows
52732+ - Bounds checking of copying between the kernel and userland
52733+
52734+config GRKERNSEC_HIGH
52735+ bool "High"
52736+ select GRKERNSEC_LINK
52737+ select GRKERNSEC_FIFO
52738+ select GRKERNSEC_DMESG
52739+ select GRKERNSEC_FORKFAIL
52740+ select GRKERNSEC_TIME
52741+ select GRKERNSEC_SIGNAL
52742+ select GRKERNSEC_CHROOT
52743+ select GRKERNSEC_CHROOT_SHMAT
52744+ select GRKERNSEC_CHROOT_UNIX
52745+ select GRKERNSEC_CHROOT_MOUNT
52746+ select GRKERNSEC_CHROOT_FCHDIR
52747+ select GRKERNSEC_CHROOT_PIVOT
52748+ select GRKERNSEC_CHROOT_DOUBLE
52749+ select GRKERNSEC_CHROOT_CHDIR
52750+ select GRKERNSEC_CHROOT_MKNOD
52751+ select GRKERNSEC_CHROOT_CAPS
52752+ select GRKERNSEC_CHROOT_SYSCTL
52753+ select GRKERNSEC_CHROOT_FINDTASK
52754+ select GRKERNSEC_SYSFS_RESTRICT
52755+ select GRKERNSEC_PROC
52756+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52757+ select GRKERNSEC_HIDESYM
52758+ select GRKERNSEC_BRUTE
52759+ select GRKERNSEC_PROC_USERGROUP
52760+ select GRKERNSEC_KMEM
52761+ select GRKERNSEC_RESLOG
52762+ select GRKERNSEC_RANDNET
52763+ select GRKERNSEC_PROC_ADD
52764+ select GRKERNSEC_CHROOT_CHMOD
52765+ select GRKERNSEC_CHROOT_NICE
52766+ select GRKERNSEC_AUDIT_MOUNT
52767+ select GRKERNSEC_MODHARDEN if (MODULES)
52768+ select GRKERNSEC_HARDEN_PTRACE
52769+ select GRKERNSEC_VM86 if (X86_32)
52770+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
52771+ select PAX
52772+ select PAX_RANDUSTACK
52773+ select PAX_ASLR
52774+ select PAX_RANDMMAP
52775+ select PAX_NOEXEC
52776+ select PAX_MPROTECT
52777+ select PAX_EI_PAX
52778+ select PAX_PT_PAX_FLAGS
52779+ select PAX_HAVE_ACL_FLAGS
52780+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
52781+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
52782+ select PAX_RANDKSTACK if (X86_TSC && X86)
52783+ select PAX_SEGMEXEC if (X86_32)
52784+ select PAX_PAGEEXEC
52785+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
52786+ select PAX_EMUTRAMP if (PARISC)
52787+ select PAX_EMUSIGRT if (PARISC)
52788+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
52789+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
52790+ select PAX_REFCOUNT if (X86 || SPARC64)
52791+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
52792+ help
52793+ If you say Y here, many of the features of grsecurity will be
52794+ enabled, which will protect you against many kinds of attacks
52795+ against your system. The heightened security comes at a cost
52796+ of an increased chance of incompatibilities with rare software
52797+ on your machine. Since this security level enables PaX, you should
52798+ view <http://pax.grsecurity.net> and read about the PaX
52799+ project. While you are there, download chpax and run it on
52800+ binaries that cause problems with PaX. Also remember that
52801+ since the /proc restrictions are enabled, you must run your
52802+ identd as gid 1001. This security level enables the following
52803+ features in addition to those listed in the low and medium
52804+ security levels:
52805+
52806+ - Additional /proc restrictions
52807+ - Chmod restrictions in chroot
52808+ - No signals, ptrace, or viewing of processes outside of chroot
52809+ - Capability restrictions in chroot
52810+ - Deny fchdir out of chroot
52811+ - Priority restrictions in chroot
52812+ - Segmentation-based implementation of PaX
52813+ - Mprotect restrictions
52814+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
52815+ - Kernel stack randomization
52816+ - Mount/unmount/remount logging
52817+ - Kernel symbol hiding
52818+ - Prevention of memory exhaustion-based exploits
52819+ - Hardening of module auto-loading
52820+ - Ptrace restrictions
52821+ - Restricted vm86 mode
52822+ - Restricted sysfs/debugfs
52823+ - Active kernel exploit response
52824+
52825+config GRKERNSEC_CUSTOM
52826+ bool "Custom"
52827+ help
52828+ If you say Y here, you will be able to configure every grsecurity
52829+ option, which allows you to enable many more features that aren't
52830+ covered in the basic security levels. These additional features
52831+ include TPE, socket restrictions, and the sysctl system for
52832+ grsecurity. It is advised that you read through the help for
52833+ each option to determine its usefulness in your situation.
52834+
52835+endchoice
52836+
52837+menu "Address Space Protection"
52838+depends on GRKERNSEC
52839+
52840+config GRKERNSEC_KMEM
52841+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
52842+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
52843+ help
52844+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
52845+ be written to via mmap or otherwise to modify the running kernel.
52846+ /dev/port will also not be allowed to be opened. If you have module
52847+ support disabled, enabling this will close up four ways that are
52848+ currently used to insert malicious code into the running kernel.
52849+ Even with all these features enabled, we still highly recommend that
52850+ you use the RBAC system, as it is still possible for an attacker to
52851+ modify the running kernel through privileged I/O granted by ioperm/iopl.
52852+ If you are not using XFree86, you may be able to stop this additional
52853+ case by enabling the 'Disable privileged I/O' option. Though nothing
52854+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
52855+ but only to video memory, which is the only writing we allow in this
52856+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
52857+ not be allowed to mprotect it with PROT_WRITE later.
52858+ It is highly recommended that you say Y here if you meet all the
52859+ conditions above.
52860+
52861+config GRKERNSEC_VM86
52862+ bool "Restrict VM86 mode"
52863+ depends on X86_32
52864+
52865+ help
52866+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
52867+ make use of a special execution mode on 32bit x86 processors called
52868+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
52869+ video cards and will still work with this option enabled. The purpose
52870+ of the option is to prevent exploitation of emulation errors in
52871+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
52872+ Nearly all users should be able to enable this option.
52873+
52874+config GRKERNSEC_IO
52875+ bool "Disable privileged I/O"
52876+ depends on X86
52877+ select RTC_CLASS
52878+ select RTC_INTF_DEV
52879+ select RTC_DRV_CMOS
52880+
52881+ help
52882+ If you say Y here, all ioperm and iopl calls will return an error.
52883+ Ioperm and iopl can be used to modify the running kernel.
52884+ Unfortunately, some programs need this access to operate properly,
52885+ the most notable of which are XFree86 and hwclock. hwclock can be
52886+ remedied by having RTC support in the kernel, so real-time
52887+ clock support is enabled if this option is enabled, to ensure
52888+ that hwclock operates correctly. XFree86 still will not
52889+ operate correctly with this option enabled, so DO NOT CHOOSE Y
52890+ IF YOU USE XFree86. If you use XFree86 and you still want to
52891+ protect your kernel against modification, use the RBAC system.
52892+
52893+config GRKERNSEC_PROC_MEMMAP
52894+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
52895+ default y if (PAX_NOEXEC || PAX_ASLR)
52896+ depends on PAX_NOEXEC || PAX_ASLR
52897+ help
52898+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
52899+ give no information about the addresses of its mappings if
52900+ PaX features that rely on random addresses are enabled on the task.
52901+ If you use PaX it is greatly recommended that you say Y here as it
52902+ closes up a hole that makes the full ASLR useless for suid
52903+ binaries.
52904+
52905+config GRKERNSEC_BRUTE
52906+ bool "Deter exploit bruteforcing"
52907+ help
52908+ If you say Y here, attempts to bruteforce exploits against forking
52909+ daemons such as apache or sshd, as well as against suid/sgid binaries
52910+ will be deterred. When a child of a forking daemon is killed by PaX
52911+ or crashes due to an illegal instruction or other suspicious signal,
52912+ the parent process will be delayed 30 seconds upon every subsequent
52913+ fork until the administrator is able to assess the situation and
52914+ restart the daemon.
52915+ In the suid/sgid case, the attempt is logged, the user has all their
52916+ processes terminated, and they are prevented from executing any further
52917+ processes for 15 minutes.
52918+ It is recommended that you also enable signal logging in the auditing
52919+ section so that logs are generated when a process triggers a suspicious
52920+ signal.
52921+ If the sysctl option is enabled, a sysctl option with name
52922+ "deter_bruteforce" is created.
52923+
52924+
52925+config GRKERNSEC_MODHARDEN
52926+ bool "Harden module auto-loading"
52927+ depends on MODULES
52928+ help
52929+ If you say Y here, module auto-loading in response to use of some
52930+ feature implemented by an unloaded module will be restricted to
52931+ root users. Enabling this option helps defend against attacks
52932+ by unprivileged users who abuse the auto-loading behavior to
52933+ cause a vulnerable module to load that is then exploited.
52934+
52935+ If this option prevents a legitimate use of auto-loading for a
52936+ non-root user, the administrator can execute modprobe manually
52937+ with the exact name of the module mentioned in the alert log.
52938+ Alternatively, the administrator can add the module to the list
52939+ of modules loaded at boot by modifying init scripts.
52940+
52941+ Modification of init scripts will most likely be needed on
52942+ Ubuntu servers with encrypted home directory support enabled,
52943+ as the first non-root user logging in will cause the ecb(aes),
52944+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
52945+
52946+config GRKERNSEC_HIDESYM
52947+ bool "Hide kernel symbols"
52948+ help
52949+ If you say Y here, getting information on loaded modules, and
52950+ displaying all kernel symbols through a syscall will be restricted
52951+ to users with CAP_SYS_MODULE. For software compatibility reasons,
52952+ /proc/kallsyms will be restricted to the root user. The RBAC
52953+ system can hide that entry even from root.
52954+
52955+ This option also prevents leaking of kernel addresses through
52956+ several /proc entries.
52957+
52958+ Note that this option is only effective provided the following
52959+ conditions are met:
52960+ 1) The kernel using grsecurity is not precompiled by some distribution
52961+ 2) You have also enabled GRKERNSEC_DMESG
52962+ 3) You are using the RBAC system and hiding other files such as your
52963+ kernel image and System.map. Alternatively, enabling this option
52964+ causes the permissions on /boot, /lib/modules, and the kernel
52965+ source directory to change at compile time to prevent
52966+ reading by non-root users.
52967+ If the above conditions are met, this option will aid in providing a
52968+ useful protection against local kernel exploitation of overflows
52969+ and arbitrary read/write vulnerabilities.
52970+
52971+config GRKERNSEC_KERN_LOCKOUT
52972+ bool "Active kernel exploit response"
52973+ depends on X86 || ARM || PPC || SPARC
52974+ help
52975+ If you say Y here, when a PaX alert is triggered due to suspicious
52976+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
52977+ or an OOPs occurs due to bad memory accesses, instead of just
52978+ terminating the offending process (and potentially allowing
52979+ a subsequent exploit from the same user), we will take one of two
52980+ actions:
52981+ If the user was root, we will panic the system
52982+ If the user was non-root, we will log the attempt, terminate
52983+ all processes owned by the user, then prevent them from creating
52984+ any new processes until the system is restarted
52985+ This deters repeated kernel exploitation/bruteforcing attempts
52986+ and is useful for later forensics.
52987+
52988+endmenu
52989+menu "Role Based Access Control Options"
52990+depends on GRKERNSEC
52991+
52992+config GRKERNSEC_RBAC_DEBUG
52993+ bool
52994+
52995+config GRKERNSEC_NO_RBAC
52996+ bool "Disable RBAC system"
52997+ help
52998+ If you say Y here, the /dev/grsec device will be removed from the kernel,
52999+ preventing the RBAC system from being enabled. You should only say Y
53000+ here if you have no intention of using the RBAC system, so as to prevent
53001+ an attacker with root access from misusing the RBAC system to hide files
53002+ and processes when loadable module support and /dev/[k]mem have been
53003+ locked down.
53004+
53005+config GRKERNSEC_ACL_HIDEKERN
53006+ bool "Hide kernel processes"
53007+ help
53008+ If you say Y here, all kernel threads will be hidden to all
53009+ processes but those whose subject has the "view hidden processes"
53010+ flag.
53011+
53012+config GRKERNSEC_ACL_MAXTRIES
53013+ int "Maximum tries before password lockout"
53014+ default 3
53015+ help
53016+ This option enforces the maximum number of times a user can attempt
53017+ to authorize themselves with the grsecurity RBAC system before being
53018+ denied the ability to attempt authorization again for a specified time.
53019+ The lower the number, the harder it will be to brute-force a password.
53020+
53021+config GRKERNSEC_ACL_TIMEOUT
53022+ int "Time to wait after max password tries, in seconds"
53023+ default 30
53024+ help
53025+ This option specifies the time the user must wait after attempting to
53026+ authorize to the RBAC system with the maximum number of invalid
53027+ passwords. The higher the number, the harder it will be to brute-force
53028+ a password.
53029+
53030+endmenu
53031+menu "Filesystem Protections"
53032+depends on GRKERNSEC
53033+
53034+config GRKERNSEC_PROC
53035+ bool "Proc restrictions"
53036+ help
53037+ If you say Y here, the permissions of the /proc filesystem
53038+ will be altered to enhance system security and privacy. You MUST
53039+ choose either a user only restriction or a user and group restriction.
53040+ Depending upon the option you choose, you can either restrict users to
53041+ see only the processes they themselves run, or choose a group that can
53042+ view all processes and files normally restricted to root if you choose
53043+ the "restrict to user only" option. NOTE: If you're running identd as
53044+ a non-root user, you will have to run it as the group you specify here.
53045+
53046+config GRKERNSEC_PROC_USER
53047+ bool "Restrict /proc to user only"
53048+ depends on GRKERNSEC_PROC
53049+ help
53050+ If you say Y here, non-root users will only be able to view their own
53051+ processes, and restricts them from viewing network-related information,
53052+ and viewing kernel symbol and module information.
53053+
53054+config GRKERNSEC_PROC_USERGROUP
53055+ bool "Allow special group"
53056+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
53057+ help
53058+ If you say Y here, you will be able to select a group that will be
53059+ able to view all processes and network-related information. If you've
53060+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
53061+ remain hidden. This option is useful if you want to run identd as
53062+ a non-root user.
53063+
53064+config GRKERNSEC_PROC_GID
53065+ int "GID for special group"
53066+ depends on GRKERNSEC_PROC_USERGROUP
53067+ default 1001
53068+
53069+config GRKERNSEC_PROC_ADD
53070+ bool "Additional restrictions"
53071+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
53072+ help
53073+ If you say Y here, additional restrictions will be placed on
53074+ /proc that keep normal users from viewing device information and
53075+ slabinfo information that could be useful for exploits.
53076+
53077+config GRKERNSEC_LINK
53078+ bool "Linking restrictions"
53079+ help
53080+ If you say Y here, /tmp race exploits will be prevented, since users
53081+ will no longer be able to follow symlinks owned by other users in
53082+ world-writable +t directories (e.g. /tmp), unless the owner of the
53083+ symlink is the owner of the directory. users will also not be
53084+ able to hardlink to files they do not own. If the sysctl option is
53085+ enabled, a sysctl option with name "linking_restrictions" is created.
53086+
53087+config GRKERNSEC_FIFO
53088+ bool "FIFO restrictions"
53089+ help
53090+ If you say Y here, users will not be able to write to FIFOs they don't
53091+ own in world-writable +t directories (e.g. /tmp), unless the owner of
53092+ the FIFO is the same owner of the directory it's held in. If the sysctl
53093+ option is enabled, a sysctl option with name "fifo_restrictions" is
53094+ created.
53095+
53096+config GRKERNSEC_SYSFS_RESTRICT
53097+ bool "Sysfs/debugfs restriction"
53098+ depends on SYSFS
53099+ help
53100+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
53101+ any filesystem normally mounted under it (e.g. debugfs) will only
53102+ be accessible by root. These filesystems generally provide access
53103+ to hardware and debug information that isn't appropriate for unprivileged
53104+ users of the system. Sysfs and debugfs have also become a large source
53105+ of new vulnerabilities, ranging from infoleaks to local compromise.
53106+ There has been very little oversight with an eye toward security involved
53107+ in adding new exporters of information to these filesystems, so their
53108+ use is discouraged.
53109+ This option is equivalent to a chmod 0700 of the mount paths.
53110+
53111+config GRKERNSEC_ROFS
53112+ bool "Runtime read-only mount protection"
53113+ help
53114+ If you say Y here, a sysctl option with name "romount_protect" will
53115+ be created. By setting this option to 1 at runtime, filesystems
53116+ will be protected in the following ways:
53117+ * No new writable mounts will be allowed
53118+ * Existing read-only mounts won't be able to be remounted read/write
53119+ * Write operations will be denied on all block devices
53120+ This option acts independently of grsec_lock: once it is set to 1,
53121+ it cannot be turned off. Therefore, please be mindful of the resulting
53122+ behavior if this option is enabled in an init script on a read-only
53123+ filesystem. This feature is mainly intended for secure embedded systems.
53124+
53125+config GRKERNSEC_CHROOT
53126+ bool "Chroot jail restrictions"
53127+ help
53128+ If you say Y here, you will be able to choose several options that will
53129+ make breaking out of a chrooted jail much more difficult. If you
53130+ encounter no software incompatibilities with the following options, it
53131+ is recommended that you enable each one.
53132+
53133+config GRKERNSEC_CHROOT_MOUNT
53134+ bool "Deny mounts"
53135+ depends on GRKERNSEC_CHROOT
53136+ help
53137+ If you say Y here, processes inside a chroot will not be able to
53138+ mount or remount filesystems. If the sysctl option is enabled, a
53139+ sysctl option with name "chroot_deny_mount" is created.
53140+
53141+config GRKERNSEC_CHROOT_DOUBLE
53142+ bool "Deny double-chroots"
53143+ depends on GRKERNSEC_CHROOT
53144+ help
53145+ If you say Y here, processes inside a chroot will not be able to chroot
53146+ again outside the chroot. This is a widely used method of breaking
53147+ out of a chroot jail and should not be allowed. If the sysctl
53148+ option is enabled, a sysctl option with name
53149+ "chroot_deny_chroot" is created.
53150+
53151+config GRKERNSEC_CHROOT_PIVOT
53152+ bool "Deny pivot_root in chroot"
53153+ depends on GRKERNSEC_CHROOT
53154+ help
53155+ If you say Y here, processes inside a chroot will not be able to use
53156+ a function called pivot_root() that was introduced in Linux 2.3.41. It
53157+ works similar to chroot in that it changes the root filesystem. This
53158+ function could be misused in a chrooted process to attempt to break out
53159+ of the chroot, and therefore should not be allowed. If the sysctl
53160+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
53161+ created.
53162+
53163+config GRKERNSEC_CHROOT_CHDIR
53164+ bool "Enforce chdir(\"/\") on all chroots"
53165+ depends on GRKERNSEC_CHROOT
53166+ help
53167+ If you say Y here, the current working directory of all newly-chrooted
53168+ applications will be set to the the root directory of the chroot.
53169+ The man page on chroot(2) states:
53170+ Note that this call does not change the current working
53171+ directory, so that `.' can be outside the tree rooted at
53172+ `/'. In particular, the super-user can escape from a
53173+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
53174+
53175+ It is recommended that you say Y here, since it's not known to break
53176+ any software. If the sysctl option is enabled, a sysctl option with
53177+ name "chroot_enforce_chdir" is created.
53178+
53179+config GRKERNSEC_CHROOT_CHMOD
53180+ bool "Deny (f)chmod +s"
53181+ depends on GRKERNSEC_CHROOT
53182+ help
53183+ If you say Y here, processes inside a chroot will not be able to chmod
53184+ or fchmod files to make them have suid or sgid bits. This protects
53185+ against another published method of breaking a chroot. If the sysctl
53186+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
53187+ created.
53188+
53189+config GRKERNSEC_CHROOT_FCHDIR
53190+ bool "Deny fchdir out of chroot"
53191+ depends on GRKERNSEC_CHROOT
53192+ help
53193+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
53194+ to a file descriptor of the chrooting process that points to a directory
53195+ outside the filesystem will be stopped. If the sysctl option
53196+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
53197+
53198+config GRKERNSEC_CHROOT_MKNOD
53199+ bool "Deny mknod"
53200+ depends on GRKERNSEC_CHROOT
53201+ help
53202+ If you say Y here, processes inside a chroot will not be allowed to
53203+ mknod. The problem with using mknod inside a chroot is that it
53204+ would allow an attacker to create a device entry that is the same
53205+ as one on the physical root of your system, which could range from
53206+ anything from the console device to a device for your harddrive (which
53207+ they could then use to wipe the drive or steal data). It is recommended
53208+ that you say Y here, unless you run into software incompatibilities.
53209+ If the sysctl option is enabled, a sysctl option with name
53210+ "chroot_deny_mknod" is created.
53211+
53212+config GRKERNSEC_CHROOT_SHMAT
53213+ bool "Deny shmat() out of chroot"
53214+ depends on GRKERNSEC_CHROOT
53215+ help
53216+ If you say Y here, processes inside a chroot will not be able to attach
53217+ to shared memory segments that were created outside of the chroot jail.
53218+ It is recommended that you say Y here. If the sysctl option is enabled,
53219+ a sysctl option with name "chroot_deny_shmat" is created.
53220+
53221+config GRKERNSEC_CHROOT_UNIX
53222+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
53223+ depends on GRKERNSEC_CHROOT
53224+ help
53225+ If you say Y here, processes inside a chroot will not be able to
53226+ connect to abstract (meaning not belonging to a filesystem) Unix
53227+ domain sockets that were bound outside of a chroot. It is recommended
53228+ that you say Y here. If the sysctl option is enabled, a sysctl option
53229+ with name "chroot_deny_unix" is created.
53230+
53231+config GRKERNSEC_CHROOT_FINDTASK
53232+ bool "Protect outside processes"
53233+ depends on GRKERNSEC_CHROOT
53234+ help
53235+ If you say Y here, processes inside a chroot will not be able to
53236+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
53237+ getsid, or view any process outside of the chroot. If the sysctl
53238+ option is enabled, a sysctl option with name "chroot_findtask" is
53239+ created.
53240+
53241+config GRKERNSEC_CHROOT_NICE
53242+ bool "Restrict priority changes"
53243+ depends on GRKERNSEC_CHROOT
53244+ help
53245+ If you say Y here, processes inside a chroot will not be able to raise
53246+ the priority of processes in the chroot, or alter the priority of
53247+ processes outside the chroot. This provides more security than simply
53248+ removing CAP_SYS_NICE from the process' capability set. If the
53249+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
53250+ is created.
53251+
53252+config GRKERNSEC_CHROOT_SYSCTL
53253+ bool "Deny sysctl writes"
53254+ depends on GRKERNSEC_CHROOT
53255+ help
53256+ If you say Y here, an attacker in a chroot will not be able to
53257+ write to sysctl entries, either by sysctl(2) or through a /proc
53258+ interface. It is strongly recommended that you say Y here. If the
53259+ sysctl option is enabled, a sysctl option with name
53260+ "chroot_deny_sysctl" is created.
53261+
53262+config GRKERNSEC_CHROOT_CAPS
53263+ bool "Capability restrictions"
53264+ depends on GRKERNSEC_CHROOT
53265+ help
53266+ If you say Y here, the capabilities on all processes within a
53267+ chroot jail will be lowered to stop module insertion, raw i/o,
53268+ system and net admin tasks, rebooting the system, modifying immutable
53269+ files, modifying IPC owned by another, and changing the system time.
53270+ This is left an option because it can break some apps. Disable this
53271+ if your chrooted apps are having problems performing those kinds of
53272+ tasks. If the sysctl option is enabled, a sysctl option with
53273+ name "chroot_caps" is created.
53274+
53275+endmenu
53276+menu "Kernel Auditing"
53277+depends on GRKERNSEC
53278+
53279+config GRKERNSEC_AUDIT_GROUP
53280+ bool "Single group for auditing"
53281+ help
53282+ If you say Y here, the exec, chdir, and (un)mount logging features
53283+ will only operate on a group you specify. This option is recommended
53284+ if you only want to watch certain users instead of having a large
53285+ amount of logs from the entire system. If the sysctl option is enabled,
53286+ a sysctl option with name "audit_group" is created.
53287+
53288+config GRKERNSEC_AUDIT_GID
53289+ int "GID for auditing"
53290+ depends on GRKERNSEC_AUDIT_GROUP
53291+ default 1007
53292+
53293+config GRKERNSEC_EXECLOG
53294+ bool "Exec logging"
53295+ help
53296+ If you say Y here, all execve() calls will be logged (since the
53297+ other exec*() calls are frontends to execve(), all execution
53298+ will be logged). Useful for shell-servers that like to keep track
53299+ of their users. If the sysctl option is enabled, a sysctl option with
53300+ name "exec_logging" is created.
53301+ WARNING: This option when enabled will produce a LOT of logs, especially
53302+ on an active system.
53303+
53304+config GRKERNSEC_RESLOG
53305+ bool "Resource logging"
53306+ help
53307+ If you say Y here, all attempts to overstep resource limits will
53308+ be logged with the resource name, the requested size, and the current
53309+ limit. It is highly recommended that you say Y here. If the sysctl
53310+ option is enabled, a sysctl option with name "resource_logging" is
53311+ created. If the RBAC system is enabled, the sysctl value is ignored.
53312+
53313+config GRKERNSEC_CHROOT_EXECLOG
53314+ bool "Log execs within chroot"
53315+ help
53316+ If you say Y here, all executions inside a chroot jail will be logged
53317+ to syslog. This can cause a large amount of logs if certain
53318+ applications (eg. djb's daemontools) are installed on the system, and
53319+ is therefore left as an option. If the sysctl option is enabled, a
53320+ sysctl option with name "chroot_execlog" is created.
53321+
53322+config GRKERNSEC_AUDIT_PTRACE
53323+ bool "Ptrace logging"
53324+ help
53325+ If you say Y here, all attempts to attach to a process via ptrace
53326+ will be logged. If the sysctl option is enabled, a sysctl option
53327+ with name "audit_ptrace" is created.
53328+
53329+config GRKERNSEC_AUDIT_CHDIR
53330+ bool "Chdir logging"
53331+ help
53332+ If you say Y here, all chdir() calls will be logged. If the sysctl
53333+ option is enabled, a sysctl option with name "audit_chdir" is created.
53334+
53335+config GRKERNSEC_AUDIT_MOUNT
53336+ bool "(Un)Mount logging"
53337+ help
53338+ If you say Y here, all mounts and unmounts will be logged. If the
53339+ sysctl option is enabled, a sysctl option with name "audit_mount" is
53340+ created.
53341+
53342+config GRKERNSEC_SIGNAL
53343+ bool "Signal logging"
53344+ help
53345+ If you say Y here, certain important signals will be logged, such as
53346+ SIGSEGV, which will as a result inform you of when a error in a program
53347+ occurred, which in some cases could mean a possible exploit attempt.
53348+ If the sysctl option is enabled, a sysctl option with name
53349+ "signal_logging" is created.
53350+
53351+config GRKERNSEC_FORKFAIL
53352+ bool "Fork failure logging"
53353+ help
53354+ If you say Y here, all failed fork() attempts will be logged.
53355+ This could suggest a fork bomb, or someone attempting to overstep
53356+ their process limit. If the sysctl option is enabled, a sysctl option
53357+ with name "forkfail_logging" is created.
53358+
53359+config GRKERNSEC_TIME
53360+ bool "Time change logging"
53361+ help
53362+ If you say Y here, any changes of the system clock will be logged.
53363+ If the sysctl option is enabled, a sysctl option with name
53364+ "timechange_logging" is created.
53365+
53366+config GRKERNSEC_PROC_IPADDR
53367+ bool "/proc/<pid>/ipaddr support"
53368+ help
53369+ If you say Y here, a new entry will be added to each /proc/<pid>
53370+ directory that contains the IP address of the person using the task.
53371+ The IP is carried across local TCP and AF_UNIX stream sockets.
53372+ This information can be useful for IDS/IPSes to perform remote response
53373+ to a local attack. The entry is readable by only the owner of the
53374+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
53375+ the RBAC system), and thus does not create privacy concerns.
53376+
53377+config GRKERNSEC_RWXMAP_LOG
53378+ bool 'Denied RWX mmap/mprotect logging'
53379+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
53380+ help
53381+ If you say Y here, calls to mmap() and mprotect() with explicit
53382+ usage of PROT_WRITE and PROT_EXEC together will be logged when
53383+ denied by the PAX_MPROTECT feature. If the sysctl option is
53384+ enabled, a sysctl option with name "rwxmap_logging" is created.
53385+
53386+config GRKERNSEC_AUDIT_TEXTREL
53387+ bool 'ELF text relocations logging (READ HELP)'
53388+ depends on PAX_MPROTECT
53389+ help
53390+ If you say Y here, text relocations will be logged with the filename
53391+ of the offending library or binary. The purpose of the feature is
53392+ to help Linux distribution developers get rid of libraries and
53393+ binaries that need text relocations which hinder the future progress
53394+ of PaX. Only Linux distribution developers should say Y here, and
53395+ never on a production machine, as this option creates an information
53396+ leak that could aid an attacker in defeating the randomization of
53397+ a single memory region. If the sysctl option is enabled, a sysctl
53398+ option with name "audit_textrel" is created.
53399+
53400+endmenu
53401+
53402+menu "Executable Protections"
53403+depends on GRKERNSEC
53404+
53405+config GRKERNSEC_DMESG
53406+ bool "Dmesg(8) restriction"
53407+ help
53408+ If you say Y here, non-root users will not be able to use dmesg(8)
53409+ to view up to the last 4kb of messages in the kernel's log buffer.
53410+ The kernel's log buffer often contains kernel addresses and other
53411+ identifying information useful to an attacker in fingerprinting a
53412+ system for a targeted exploit.
53413+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
53414+ created.
53415+
53416+config GRKERNSEC_HARDEN_PTRACE
53417+ bool "Deter ptrace-based process snooping"
53418+ help
53419+ If you say Y here, TTY sniffers and other malicious monitoring
53420+ programs implemented through ptrace will be defeated. If you
53421+ have been using the RBAC system, this option has already been
53422+ enabled for several years for all users, with the ability to make
53423+ fine-grained exceptions.
53424+
53425+ This option only affects the ability of non-root users to ptrace
53426+ processes that are not a descendent of the ptracing process.
53427+ This means that strace ./binary and gdb ./binary will still work,
53428+ but attaching to arbitrary processes will not. If the sysctl
53429+ option is enabled, a sysctl option with name "harden_ptrace" is
53430+ created.
53431+
53432+config GRKERNSEC_TPE
53433+ bool "Trusted Path Execution (TPE)"
53434+ help
53435+ If you say Y here, you will be able to choose a gid to add to the
53436+ supplementary groups of users you want to mark as "untrusted."
53437+ These users will not be able to execute any files that are not in
53438+ root-owned directories writable only by root. If the sysctl option
53439+ is enabled, a sysctl option with name "tpe" is created.
53440+
53441+config GRKERNSEC_TPE_ALL
53442+ bool "Partially restrict all non-root users"
53443+ depends on GRKERNSEC_TPE
53444+ help
53445+ If you say Y here, all non-root users will be covered under
53446+ a weaker TPE restriction. This is separate from, and in addition to,
53447+ the main TPE options that you have selected elsewhere. Thus, if a
53448+ "trusted" GID is chosen, this restriction applies to even that GID.
53449+ Under this restriction, all non-root users will only be allowed to
53450+ execute files in directories they own that are not group or
53451+ world-writable, or in directories owned by root and writable only by
53452+ root. If the sysctl option is enabled, a sysctl option with name
53453+ "tpe_restrict_all" is created.
53454+
53455+config GRKERNSEC_TPE_INVERT
53456+ bool "Invert GID option"
53457+ depends on GRKERNSEC_TPE
53458+ help
53459+ If you say Y here, the group you specify in the TPE configuration will
53460+ decide what group TPE restrictions will be *disabled* for. This
53461+ option is useful if you want TPE restrictions to be applied to most
53462+ users on the system. If the sysctl option is enabled, a sysctl option
53463+ with name "tpe_invert" is created. Unlike other sysctl options, this
53464+ entry will default to on for backward-compatibility.
53465+
53466+config GRKERNSEC_TPE_GID
53467+ int "GID for untrusted users"
53468+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
53469+ default 1005
53470+ help
53471+ Setting this GID determines what group TPE restrictions will be
53472+ *enabled* for. If the sysctl option is enabled, a sysctl option
53473+ with name "tpe_gid" is created.
53474+
53475+config GRKERNSEC_TPE_GID
53476+ int "GID for trusted users"
53477+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
53478+ default 1005
53479+ help
53480+ Setting this GID determines what group TPE restrictions will be
53481+ *disabled* for. If the sysctl option is enabled, a sysctl option
53482+ with name "tpe_gid" is created.
53483+
53484+endmenu
53485+menu "Network Protections"
53486+depends on GRKERNSEC
53487+
53488+config GRKERNSEC_RANDNET
53489+ bool "Larger entropy pools"
53490+ help
53491+ If you say Y here, the entropy pools used for many features of Linux
53492+ and grsecurity will be doubled in size. Since several grsecurity
53493+ features use additional randomness, it is recommended that you say Y
53494+ here. Saying Y here has a similar effect as modifying
53495+ /proc/sys/kernel/random/poolsize.
53496+
53497+config GRKERNSEC_BLACKHOLE
53498+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
53499+ depends on NET
53500+ help
53501+ If you say Y here, neither TCP resets nor ICMP
53502+ destination-unreachable packets will be sent in response to packets
53503+ sent to ports for which no associated listening process exists.
53504+ This feature supports both IPV4 and IPV6 and exempts the
53505+ loopback interface from blackholing. Enabling this feature
53506+ makes a host more resilient to DoS attacks and reduces network
53507+ visibility against scanners.
53508+
53509+ The blackhole feature as-implemented is equivalent to the FreeBSD
53510+ blackhole feature, as it prevents RST responses to all packets, not
53511+ just SYNs. Under most application behavior this causes no
53512+ problems, but applications (like haproxy) may not close certain
53513+ connections in a way that cleanly terminates them on the remote
53514+ end, leaving the remote host in LAST_ACK state. Because of this
53515+ side-effect and to prevent intentional LAST_ACK DoSes, this
53516+ feature also adds automatic mitigation against such attacks.
53517+ The mitigation drastically reduces the amount of time a socket
53518+ can spend in LAST_ACK state. If you're using haproxy and not
53519+ all servers it connects to have this option enabled, consider
53520+ disabling this feature on the haproxy host.
53521+
53522+ If the sysctl option is enabled, two sysctl options with names
53523+ "ip_blackhole" and "lastack_retries" will be created.
53524+ While "ip_blackhole" takes the standard zero/non-zero on/off
53525+ toggle, "lastack_retries" uses the same kinds of values as
53526+ "tcp_retries1" and "tcp_retries2". The default value of 4
53527+ prevents a socket from lasting more than 45 seconds in LAST_ACK
53528+ state.
53529+
53530+config GRKERNSEC_SOCKET
53531+ bool "Socket restrictions"
53532+ depends on NET
53533+ help
53534+ If you say Y here, you will be able to choose from several options.
53535+ If you assign a GID on your system and add it to the supplementary
53536+ groups of users you want to restrict socket access to, this patch
53537+ will perform up to three things, based on the option(s) you choose.
53538+
53539+config GRKERNSEC_SOCKET_ALL
53540+ bool "Deny any sockets to group"
53541+ depends on GRKERNSEC_SOCKET
53542+ help
53543+ If you say Y here, you will be able to choose a GID of whose users will
53544+ be unable to connect to other hosts from your machine or run server
53545+ applications from your machine. If the sysctl option is enabled, a
53546+ sysctl option with name "socket_all" is created.
53547+
53548+config GRKERNSEC_SOCKET_ALL_GID
53549+ int "GID to deny all sockets for"
53550+ depends on GRKERNSEC_SOCKET_ALL
53551+ default 1004
53552+ help
53553+ Here you can choose the GID to disable socket access for. Remember to
53554+ add the users you want socket access disabled for to the GID
53555+ specified here. If the sysctl option is enabled, a sysctl option
53556+ with name "socket_all_gid" is created.
53557+
53558+config GRKERNSEC_SOCKET_CLIENT
53559+ bool "Deny client sockets to group"
53560+ depends on GRKERNSEC_SOCKET
53561+ help
53562+ If you say Y here, you will be able to choose a GID of whose users will
53563+ be unable to connect to other hosts from your machine, but will be
53564+ able to run servers. If this option is enabled, all users in the group
53565+ you specify will have to use passive mode when initiating ftp transfers
53566+ from the shell on your machine. If the sysctl option is enabled, a
53567+ sysctl option with name "socket_client" is created.
53568+
53569+config GRKERNSEC_SOCKET_CLIENT_GID
53570+ int "GID to deny client sockets for"
53571+ depends on GRKERNSEC_SOCKET_CLIENT
53572+ default 1003
53573+ help
53574+ Here you can choose the GID to disable client socket access for.
53575+ Remember to add the users you want client socket access disabled for to
53576+ the GID specified here. If the sysctl option is enabled, a sysctl
53577+ option with name "socket_client_gid" is created.
53578+
53579+config GRKERNSEC_SOCKET_SERVER
53580+ bool "Deny server sockets to group"
53581+ depends on GRKERNSEC_SOCKET
53582+ help
53583+ If you say Y here, you will be able to choose a GID of whose users will
53584+ be unable to run server applications from your machine. If the sysctl
53585+ option is enabled, a sysctl option with name "socket_server" is created.
53586+
53587+config GRKERNSEC_SOCKET_SERVER_GID
53588+ int "GID to deny server sockets for"
53589+ depends on GRKERNSEC_SOCKET_SERVER
53590+ default 1002
53591+ help
53592+ Here you can choose the GID to disable server socket access for.
53593+ Remember to add the users you want server socket access disabled for to
53594+ the GID specified here. If the sysctl option is enabled, a sysctl
53595+ option with name "socket_server_gid" is created.
53596+
53597+endmenu
53598+menu "Sysctl support"
53599+depends on GRKERNSEC && SYSCTL
53600+
53601+config GRKERNSEC_SYSCTL
53602+ bool "Sysctl support"
53603+ help
53604+ If you say Y here, you will be able to change the options that
53605+ grsecurity runs with at bootup, without having to recompile your
53606+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
53607+ to enable (1) or disable (0) various features. All the sysctl entries
53608+ are mutable until the "grsec_lock" entry is set to a non-zero value.
53609+ All features enabled in the kernel configuration are disabled at boot
53610+ if you do not say Y to the "Turn on features by default" option.
53611+ All options should be set at startup, and the grsec_lock entry should
53612+ be set to a non-zero value after all the options are set.
53613+ *THIS IS EXTREMELY IMPORTANT*
53614+
53615+config GRKERNSEC_SYSCTL_DISTRO
53616+ bool "Extra sysctl support for distro makers (READ HELP)"
53617+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
53618+ help
53619+ If you say Y here, additional sysctl options will be created
53620+ for features that affect processes running as root. Therefore,
53621+ it is critical when using this option that the grsec_lock entry be
53622+ enabled after boot. Only distros with prebuilt kernel packages
53623+ with this option enabled that can ensure grsec_lock is enabled
53624+ after boot should use this option.
53625+ *Failure to set grsec_lock after boot makes all grsec features
53626+ this option covers useless*
53627+
53628+ Currently this option creates the following sysctl entries:
53629+ "Disable Privileged I/O": "disable_priv_io"
53630+
53631+config GRKERNSEC_SYSCTL_ON
53632+ bool "Turn on features by default"
53633+ depends on GRKERNSEC_SYSCTL
53634+ help
53635+ If you say Y here, instead of having all features enabled in the
53636+ kernel configuration disabled at boot time, the features will be
53637+ enabled at boot time. It is recommended you say Y here unless
53638+ there is some reason you would want all sysctl-tunable features to
53639+ be disabled by default. As mentioned elsewhere, it is important
53640+ to enable the grsec_lock entry once you have finished modifying
53641+ the sysctl entries.
53642+
53643+endmenu
53644+menu "Logging Options"
53645+depends on GRKERNSEC
53646+
53647+config GRKERNSEC_FLOODTIME
53648+ int "Seconds in between log messages (minimum)"
53649+ default 10
53650+ help
53651+ This option allows you to enforce the number of seconds between
53652+ grsecurity log messages. The default should be suitable for most
53653+ people, however, if you choose to change it, choose a value small enough
53654+ to allow informative logs to be produced, but large enough to
53655+ prevent flooding.
53656+
53657+config GRKERNSEC_FLOODBURST
53658+ int "Number of messages in a burst (maximum)"
53659+ default 6
53660+ help
53661+ This option allows you to choose the maximum number of messages allowed
53662+ within the flood time interval you chose in a separate option. The
53663+ default should be suitable for most people, however if you find that
53664+ many of your logs are being interpreted as flooding, you may want to
53665+ raise this value.
53666+
53667+endmenu
53668+
53669+endmenu
53670diff -urNp linux-3.0.4/grsecurity/Makefile linux-3.0.4/grsecurity/Makefile
53671--- linux-3.0.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
53672+++ linux-3.0.4/grsecurity/Makefile 2011-09-14 23:29:56.000000000 -0400
53673@@ -0,0 +1,35 @@
53674+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
53675+# during 2001-2009 it has been completely redesigned by Brad Spengler
53676+# into an RBAC system
53677+#
53678+# All code in this directory and various hooks inserted throughout the kernel
53679+# are copyright Brad Spengler - Open Source Security, Inc., and released
53680+# under the GPL v2 or higher
53681+
53682+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
53683+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
53684+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
53685+
53686+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
53687+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
53688+ gracl_learn.o grsec_log.o
53689+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
53690+
53691+ifdef CONFIG_NET
53692+obj-y += grsec_sock.o
53693+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
53694+endif
53695+
53696+ifndef CONFIG_GRKERNSEC
53697+obj-y += grsec_disabled.o
53698+endif
53699+
53700+ifdef CONFIG_GRKERNSEC_HIDESYM
53701+extra-y := grsec_hidesym.o
53702+$(obj)/grsec_hidesym.o:
53703+ @-chmod -f 500 /boot
53704+ @-chmod -f 500 /lib/modules
53705+ @-chmod -f 500 /lib64/modules
53706+ @-chmod -f 700 .
53707+ @echo ' grsec: protected kernel image paths'
53708+endif
53709diff -urNp linux-3.0.4/include/acpi/acpi_bus.h linux-3.0.4/include/acpi/acpi_bus.h
53710--- linux-3.0.4/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
53711+++ linux-3.0.4/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
53712@@ -107,7 +107,7 @@ struct acpi_device_ops {
53713 acpi_op_bind bind;
53714 acpi_op_unbind unbind;
53715 acpi_op_notify notify;
53716-};
53717+} __no_const;
53718
53719 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
53720
53721diff -urNp linux-3.0.4/include/asm-generic/atomic-long.h linux-3.0.4/include/asm-generic/atomic-long.h
53722--- linux-3.0.4/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
53723+++ linux-3.0.4/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
53724@@ -22,6 +22,12 @@
53725
53726 typedef atomic64_t atomic_long_t;
53727
53728+#ifdef CONFIG_PAX_REFCOUNT
53729+typedef atomic64_unchecked_t atomic_long_unchecked_t;
53730+#else
53731+typedef atomic64_t atomic_long_unchecked_t;
53732+#endif
53733+
53734 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
53735
53736 static inline long atomic_long_read(atomic_long_t *l)
53737@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
53738 return (long)atomic64_read(v);
53739 }
53740
53741+#ifdef CONFIG_PAX_REFCOUNT
53742+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53743+{
53744+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53745+
53746+ return (long)atomic64_read_unchecked(v);
53747+}
53748+#endif
53749+
53750 static inline void atomic_long_set(atomic_long_t *l, long i)
53751 {
53752 atomic64_t *v = (atomic64_t *)l;
53753@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
53754 atomic64_set(v, i);
53755 }
53756
53757+#ifdef CONFIG_PAX_REFCOUNT
53758+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53759+{
53760+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53761+
53762+ atomic64_set_unchecked(v, i);
53763+}
53764+#endif
53765+
53766 static inline void atomic_long_inc(atomic_long_t *l)
53767 {
53768 atomic64_t *v = (atomic64_t *)l;
53769@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
53770 atomic64_inc(v);
53771 }
53772
53773+#ifdef CONFIG_PAX_REFCOUNT
53774+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53775+{
53776+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53777+
53778+ atomic64_inc_unchecked(v);
53779+}
53780+#endif
53781+
53782 static inline void atomic_long_dec(atomic_long_t *l)
53783 {
53784 atomic64_t *v = (atomic64_t *)l;
53785@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
53786 atomic64_dec(v);
53787 }
53788
53789+#ifdef CONFIG_PAX_REFCOUNT
53790+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53791+{
53792+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53793+
53794+ atomic64_dec_unchecked(v);
53795+}
53796+#endif
53797+
53798 static inline void atomic_long_add(long i, atomic_long_t *l)
53799 {
53800 atomic64_t *v = (atomic64_t *)l;
53801@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
53802 atomic64_add(i, v);
53803 }
53804
53805+#ifdef CONFIG_PAX_REFCOUNT
53806+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53807+{
53808+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53809+
53810+ atomic64_add_unchecked(i, v);
53811+}
53812+#endif
53813+
53814 static inline void atomic_long_sub(long i, atomic_long_t *l)
53815 {
53816 atomic64_t *v = (atomic64_t *)l;
53817@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
53818 atomic64_sub(i, v);
53819 }
53820
53821+#ifdef CONFIG_PAX_REFCOUNT
53822+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
53823+{
53824+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53825+
53826+ atomic64_sub_unchecked(i, v);
53827+}
53828+#endif
53829+
53830 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
53831 {
53832 atomic64_t *v = (atomic64_t *)l;
53833@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
53834 return (long)atomic64_inc_return(v);
53835 }
53836
53837+#ifdef CONFIG_PAX_REFCOUNT
53838+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53839+{
53840+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53841+
53842+ return (long)atomic64_inc_return_unchecked(v);
53843+}
53844+#endif
53845+
53846 static inline long atomic_long_dec_return(atomic_long_t *l)
53847 {
53848 atomic64_t *v = (atomic64_t *)l;
53849@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
53850
53851 typedef atomic_t atomic_long_t;
53852
53853+#ifdef CONFIG_PAX_REFCOUNT
53854+typedef atomic_unchecked_t atomic_long_unchecked_t;
53855+#else
53856+typedef atomic_t atomic_long_unchecked_t;
53857+#endif
53858+
53859 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
53860 static inline long atomic_long_read(atomic_long_t *l)
53861 {
53862@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
53863 return (long)atomic_read(v);
53864 }
53865
53866+#ifdef CONFIG_PAX_REFCOUNT
53867+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53868+{
53869+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53870+
53871+ return (long)atomic_read_unchecked(v);
53872+}
53873+#endif
53874+
53875 static inline void atomic_long_set(atomic_long_t *l, long i)
53876 {
53877 atomic_t *v = (atomic_t *)l;
53878@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
53879 atomic_set(v, i);
53880 }
53881
53882+#ifdef CONFIG_PAX_REFCOUNT
53883+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53884+{
53885+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53886+
53887+ atomic_set_unchecked(v, i);
53888+}
53889+#endif
53890+
53891 static inline void atomic_long_inc(atomic_long_t *l)
53892 {
53893 atomic_t *v = (atomic_t *)l;
53894@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
53895 atomic_inc(v);
53896 }
53897
53898+#ifdef CONFIG_PAX_REFCOUNT
53899+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53900+{
53901+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53902+
53903+ atomic_inc_unchecked(v);
53904+}
53905+#endif
53906+
53907 static inline void atomic_long_dec(atomic_long_t *l)
53908 {
53909 atomic_t *v = (atomic_t *)l;
53910@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
53911 atomic_dec(v);
53912 }
53913
53914+#ifdef CONFIG_PAX_REFCOUNT
53915+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53916+{
53917+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53918+
53919+ atomic_dec_unchecked(v);
53920+}
53921+#endif
53922+
53923 static inline void atomic_long_add(long i, atomic_long_t *l)
53924 {
53925 atomic_t *v = (atomic_t *)l;
53926@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
53927 atomic_add(i, v);
53928 }
53929
53930+#ifdef CONFIG_PAX_REFCOUNT
53931+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53932+{
53933+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53934+
53935+ atomic_add_unchecked(i, v);
53936+}
53937+#endif
53938+
53939 static inline void atomic_long_sub(long i, atomic_long_t *l)
53940 {
53941 atomic_t *v = (atomic_t *)l;
53942@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
53943 atomic_sub(i, v);
53944 }
53945
53946+#ifdef CONFIG_PAX_REFCOUNT
53947+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
53948+{
53949+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53950+
53951+ atomic_sub_unchecked(i, v);
53952+}
53953+#endif
53954+
53955 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
53956 {
53957 atomic_t *v = (atomic_t *)l;
53958@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
53959 return (long)atomic_inc_return(v);
53960 }
53961
53962+#ifdef CONFIG_PAX_REFCOUNT
53963+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53964+{
53965+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53966+
53967+ return (long)atomic_inc_return_unchecked(v);
53968+}
53969+#endif
53970+
53971 static inline long atomic_long_dec_return(atomic_long_t *l)
53972 {
53973 atomic_t *v = (atomic_t *)l;
53974@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
53975
53976 #endif /* BITS_PER_LONG == 64 */
53977
53978+#ifdef CONFIG_PAX_REFCOUNT
53979+static inline void pax_refcount_needs_these_functions(void)
53980+{
53981+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
53982+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
53983+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
53984+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
53985+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
53986+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
53987+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
53988+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
53989+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
53990+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
53991+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
53992+
53993+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
53994+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
53995+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
53996+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
53997+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
53998+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
53999+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
54000+}
54001+#else
54002+#define atomic_read_unchecked(v) atomic_read(v)
54003+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
54004+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
54005+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
54006+#define atomic_inc_unchecked(v) atomic_inc(v)
54007+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
54008+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
54009+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
54010+#define atomic_dec_unchecked(v) atomic_dec(v)
54011+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
54012+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
54013+
54014+#define atomic_long_read_unchecked(v) atomic_long_read(v)
54015+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
54016+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
54017+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
54018+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
54019+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
54020+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
54021+#endif
54022+
54023 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
54024diff -urNp linux-3.0.4/include/asm-generic/cache.h linux-3.0.4/include/asm-generic/cache.h
54025--- linux-3.0.4/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
54026+++ linux-3.0.4/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
54027@@ -6,7 +6,7 @@
54028 * cache lines need to provide their own cache.h.
54029 */
54030
54031-#define L1_CACHE_SHIFT 5
54032-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
54033+#define L1_CACHE_SHIFT 5UL
54034+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
54035
54036 #endif /* __ASM_GENERIC_CACHE_H */
54037diff -urNp linux-3.0.4/include/asm-generic/int-l64.h linux-3.0.4/include/asm-generic/int-l64.h
54038--- linux-3.0.4/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
54039+++ linux-3.0.4/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
54040@@ -46,6 +46,8 @@ typedef unsigned int u32;
54041 typedef signed long s64;
54042 typedef unsigned long u64;
54043
54044+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
54045+
54046 #define S8_C(x) x
54047 #define U8_C(x) x ## U
54048 #define S16_C(x) x
54049diff -urNp linux-3.0.4/include/asm-generic/int-ll64.h linux-3.0.4/include/asm-generic/int-ll64.h
54050--- linux-3.0.4/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
54051+++ linux-3.0.4/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
54052@@ -51,6 +51,8 @@ typedef unsigned int u32;
54053 typedef signed long long s64;
54054 typedef unsigned long long u64;
54055
54056+typedef unsigned long long intoverflow_t;
54057+
54058 #define S8_C(x) x
54059 #define U8_C(x) x ## U
54060 #define S16_C(x) x
54061diff -urNp linux-3.0.4/include/asm-generic/kmap_types.h linux-3.0.4/include/asm-generic/kmap_types.h
54062--- linux-3.0.4/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
54063+++ linux-3.0.4/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
54064@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
54065 KMAP_D(17) KM_NMI,
54066 KMAP_D(18) KM_NMI_PTE,
54067 KMAP_D(19) KM_KDB,
54068+KMAP_D(20) KM_CLEARPAGE,
54069 /*
54070 * Remember to update debug_kmap_atomic() when adding new kmap types!
54071 */
54072-KMAP_D(20) KM_TYPE_NR
54073+KMAP_D(21) KM_TYPE_NR
54074 };
54075
54076 #undef KMAP_D
54077diff -urNp linux-3.0.4/include/asm-generic/pgtable.h linux-3.0.4/include/asm-generic/pgtable.h
54078--- linux-3.0.4/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
54079+++ linux-3.0.4/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
54080@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
54081 #endif /* __HAVE_ARCH_PMD_WRITE */
54082 #endif
54083
54084+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
54085+static inline unsigned long pax_open_kernel(void) { return 0; }
54086+#endif
54087+
54088+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
54089+static inline unsigned long pax_close_kernel(void) { return 0; }
54090+#endif
54091+
54092 #endif /* !__ASSEMBLY__ */
54093
54094 #endif /* _ASM_GENERIC_PGTABLE_H */
54095diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopmd.h linux-3.0.4/include/asm-generic/pgtable-nopmd.h
54096--- linux-3.0.4/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
54097+++ linux-3.0.4/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
54098@@ -1,14 +1,19 @@
54099 #ifndef _PGTABLE_NOPMD_H
54100 #define _PGTABLE_NOPMD_H
54101
54102-#ifndef __ASSEMBLY__
54103-
54104 #include <asm-generic/pgtable-nopud.h>
54105
54106-struct mm_struct;
54107-
54108 #define __PAGETABLE_PMD_FOLDED
54109
54110+#define PMD_SHIFT PUD_SHIFT
54111+#define PTRS_PER_PMD 1
54112+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
54113+#define PMD_MASK (~(PMD_SIZE-1))
54114+
54115+#ifndef __ASSEMBLY__
54116+
54117+struct mm_struct;
54118+
54119 /*
54120 * Having the pmd type consist of a pud gets the size right, and allows
54121 * us to conceptually access the pud entry that this pmd is folded into
54122@@ -16,11 +21,6 @@ struct mm_struct;
54123 */
54124 typedef struct { pud_t pud; } pmd_t;
54125
54126-#define PMD_SHIFT PUD_SHIFT
54127-#define PTRS_PER_PMD 1
54128-#define PMD_SIZE (1UL << PMD_SHIFT)
54129-#define PMD_MASK (~(PMD_SIZE-1))
54130-
54131 /*
54132 * The "pud_xxx()" functions here are trivial for a folded two-level
54133 * setup: the pmd is never bad, and a pmd always exists (as it's folded
54134diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopud.h linux-3.0.4/include/asm-generic/pgtable-nopud.h
54135--- linux-3.0.4/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
54136+++ linux-3.0.4/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
54137@@ -1,10 +1,15 @@
54138 #ifndef _PGTABLE_NOPUD_H
54139 #define _PGTABLE_NOPUD_H
54140
54141-#ifndef __ASSEMBLY__
54142-
54143 #define __PAGETABLE_PUD_FOLDED
54144
54145+#define PUD_SHIFT PGDIR_SHIFT
54146+#define PTRS_PER_PUD 1
54147+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
54148+#define PUD_MASK (~(PUD_SIZE-1))
54149+
54150+#ifndef __ASSEMBLY__
54151+
54152 /*
54153 * Having the pud type consist of a pgd gets the size right, and allows
54154 * us to conceptually access the pgd entry that this pud is folded into
54155@@ -12,11 +17,6 @@
54156 */
54157 typedef struct { pgd_t pgd; } pud_t;
54158
54159-#define PUD_SHIFT PGDIR_SHIFT
54160-#define PTRS_PER_PUD 1
54161-#define PUD_SIZE (1UL << PUD_SHIFT)
54162-#define PUD_MASK (~(PUD_SIZE-1))
54163-
54164 /*
54165 * The "pgd_xxx()" functions here are trivial for a folded two-level
54166 * setup: the pud is never bad, and a pud always exists (as it's folded
54167diff -urNp linux-3.0.4/include/asm-generic/vmlinux.lds.h linux-3.0.4/include/asm-generic/vmlinux.lds.h
54168--- linux-3.0.4/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
54169+++ linux-3.0.4/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
54170@@ -217,6 +217,7 @@
54171 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
54172 VMLINUX_SYMBOL(__start_rodata) = .; \
54173 *(.rodata) *(.rodata.*) \
54174+ *(.data..read_only) \
54175 *(__vermagic) /* Kernel version magic */ \
54176 . = ALIGN(8); \
54177 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
54178@@ -723,17 +724,18 @@
54179 * section in the linker script will go there too. @phdr should have
54180 * a leading colon.
54181 *
54182- * Note that this macros defines __per_cpu_load as an absolute symbol.
54183+ * Note that this macros defines per_cpu_load as an absolute symbol.
54184 * If there is no need to put the percpu section at a predetermined
54185 * address, use PERCPU_SECTION.
54186 */
54187 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
54188- VMLINUX_SYMBOL(__per_cpu_load) = .; \
54189- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
54190+ per_cpu_load = .; \
54191+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
54192 - LOAD_OFFSET) { \
54193+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
54194 PERCPU_INPUT(cacheline) \
54195 } phdr \
54196- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
54197+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
54198
54199 /**
54200 * PERCPU_SECTION - define output section for percpu area, simple version
54201diff -urNp linux-3.0.4/include/drm/drm_crtc_helper.h linux-3.0.4/include/drm/drm_crtc_helper.h
54202--- linux-3.0.4/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
54203+++ linux-3.0.4/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
54204@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
54205
54206 /* disable crtc when not in use - more explicit than dpms off */
54207 void (*disable)(struct drm_crtc *crtc);
54208-};
54209+} __no_const;
54210
54211 struct drm_encoder_helper_funcs {
54212 void (*dpms)(struct drm_encoder *encoder, int mode);
54213@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
54214 struct drm_connector *connector);
54215 /* disable encoder when not in use - more explicit than dpms off */
54216 void (*disable)(struct drm_encoder *encoder);
54217-};
54218+} __no_const;
54219
54220 struct drm_connector_helper_funcs {
54221 int (*get_modes)(struct drm_connector *connector);
54222diff -urNp linux-3.0.4/include/drm/drmP.h linux-3.0.4/include/drm/drmP.h
54223--- linux-3.0.4/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
54224+++ linux-3.0.4/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
54225@@ -73,6 +73,7 @@
54226 #include <linux/workqueue.h>
54227 #include <linux/poll.h>
54228 #include <asm/pgalloc.h>
54229+#include <asm/local.h>
54230 #include "drm.h"
54231
54232 #include <linux/idr.h>
54233@@ -1033,7 +1034,7 @@ struct drm_device {
54234
54235 /** \name Usage Counters */
54236 /*@{ */
54237- int open_count; /**< Outstanding files open */
54238+ local_t open_count; /**< Outstanding files open */
54239 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
54240 atomic_t vma_count; /**< Outstanding vma areas open */
54241 int buf_use; /**< Buffers in use -- cannot alloc */
54242@@ -1044,7 +1045,7 @@ struct drm_device {
54243 /*@{ */
54244 unsigned long counters;
54245 enum drm_stat_type types[15];
54246- atomic_t counts[15];
54247+ atomic_unchecked_t counts[15];
54248 /*@} */
54249
54250 struct list_head filelist;
54251diff -urNp linux-3.0.4/include/drm/ttm/ttm_memory.h linux-3.0.4/include/drm/ttm/ttm_memory.h
54252--- linux-3.0.4/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
54253+++ linux-3.0.4/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
54254@@ -47,7 +47,7 @@
54255
54256 struct ttm_mem_shrink {
54257 int (*do_shrink) (struct ttm_mem_shrink *);
54258-};
54259+} __no_const;
54260
54261 /**
54262 * struct ttm_mem_global - Global memory accounting structure.
54263diff -urNp linux-3.0.4/include/linux/a.out.h linux-3.0.4/include/linux/a.out.h
54264--- linux-3.0.4/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
54265+++ linux-3.0.4/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
54266@@ -39,6 +39,14 @@ enum machine_type {
54267 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
54268 };
54269
54270+/* Constants for the N_FLAGS field */
54271+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54272+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
54273+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
54274+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
54275+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54276+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54277+
54278 #if !defined (N_MAGIC)
54279 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
54280 #endif
54281diff -urNp linux-3.0.4/include/linux/atmdev.h linux-3.0.4/include/linux/atmdev.h
54282--- linux-3.0.4/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
54283+++ linux-3.0.4/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
54284@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
54285 #endif
54286
54287 struct k_atm_aal_stats {
54288-#define __HANDLE_ITEM(i) atomic_t i
54289+#define __HANDLE_ITEM(i) atomic_unchecked_t i
54290 __AAL_STAT_ITEMS
54291 #undef __HANDLE_ITEM
54292 };
54293diff -urNp linux-3.0.4/include/linux/binfmts.h linux-3.0.4/include/linux/binfmts.h
54294--- linux-3.0.4/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
54295+++ linux-3.0.4/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
54296@@ -88,6 +88,7 @@ struct linux_binfmt {
54297 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
54298 int (*load_shlib)(struct file *);
54299 int (*core_dump)(struct coredump_params *cprm);
54300+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
54301 unsigned long min_coredump; /* minimal dump size */
54302 };
54303
54304diff -urNp linux-3.0.4/include/linux/blkdev.h linux-3.0.4/include/linux/blkdev.h
54305--- linux-3.0.4/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
54306+++ linux-3.0.4/include/linux/blkdev.h 2011-08-26 19:49:56.000000000 -0400
54307@@ -1308,7 +1308,7 @@ struct block_device_operations {
54308 /* this callback is with swap_lock and sometimes page table lock held */
54309 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
54310 struct module *owner;
54311-};
54312+} __do_const;
54313
54314 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
54315 unsigned long);
54316diff -urNp linux-3.0.4/include/linux/blktrace_api.h linux-3.0.4/include/linux/blktrace_api.h
54317--- linux-3.0.4/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
54318+++ linux-3.0.4/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
54319@@ -161,7 +161,7 @@ struct blk_trace {
54320 struct dentry *dir;
54321 struct dentry *dropped_file;
54322 struct dentry *msg_file;
54323- atomic_t dropped;
54324+ atomic_unchecked_t dropped;
54325 };
54326
54327 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
54328diff -urNp linux-3.0.4/include/linux/byteorder/little_endian.h linux-3.0.4/include/linux/byteorder/little_endian.h
54329--- linux-3.0.4/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
54330+++ linux-3.0.4/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
54331@@ -42,51 +42,51 @@
54332
54333 static inline __le64 __cpu_to_le64p(const __u64 *p)
54334 {
54335- return (__force __le64)*p;
54336+ return (__force const __le64)*p;
54337 }
54338 static inline __u64 __le64_to_cpup(const __le64 *p)
54339 {
54340- return (__force __u64)*p;
54341+ return (__force const __u64)*p;
54342 }
54343 static inline __le32 __cpu_to_le32p(const __u32 *p)
54344 {
54345- return (__force __le32)*p;
54346+ return (__force const __le32)*p;
54347 }
54348 static inline __u32 __le32_to_cpup(const __le32 *p)
54349 {
54350- return (__force __u32)*p;
54351+ return (__force const __u32)*p;
54352 }
54353 static inline __le16 __cpu_to_le16p(const __u16 *p)
54354 {
54355- return (__force __le16)*p;
54356+ return (__force const __le16)*p;
54357 }
54358 static inline __u16 __le16_to_cpup(const __le16 *p)
54359 {
54360- return (__force __u16)*p;
54361+ return (__force const __u16)*p;
54362 }
54363 static inline __be64 __cpu_to_be64p(const __u64 *p)
54364 {
54365- return (__force __be64)__swab64p(p);
54366+ return (__force const __be64)__swab64p(p);
54367 }
54368 static inline __u64 __be64_to_cpup(const __be64 *p)
54369 {
54370- return __swab64p((__u64 *)p);
54371+ return __swab64p((const __u64 *)p);
54372 }
54373 static inline __be32 __cpu_to_be32p(const __u32 *p)
54374 {
54375- return (__force __be32)__swab32p(p);
54376+ return (__force const __be32)__swab32p(p);
54377 }
54378 static inline __u32 __be32_to_cpup(const __be32 *p)
54379 {
54380- return __swab32p((__u32 *)p);
54381+ return __swab32p((const __u32 *)p);
54382 }
54383 static inline __be16 __cpu_to_be16p(const __u16 *p)
54384 {
54385- return (__force __be16)__swab16p(p);
54386+ return (__force const __be16)__swab16p(p);
54387 }
54388 static inline __u16 __be16_to_cpup(const __be16 *p)
54389 {
54390- return __swab16p((__u16 *)p);
54391+ return __swab16p((const __u16 *)p);
54392 }
54393 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
54394 #define __le64_to_cpus(x) do { (void)(x); } while (0)
54395diff -urNp linux-3.0.4/include/linux/cache.h linux-3.0.4/include/linux/cache.h
54396--- linux-3.0.4/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
54397+++ linux-3.0.4/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
54398@@ -16,6 +16,10 @@
54399 #define __read_mostly
54400 #endif
54401
54402+#ifndef __read_only
54403+#define __read_only __read_mostly
54404+#endif
54405+
54406 #ifndef ____cacheline_aligned
54407 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
54408 #endif
54409diff -urNp linux-3.0.4/include/linux/capability.h linux-3.0.4/include/linux/capability.h
54410--- linux-3.0.4/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
54411+++ linux-3.0.4/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
54412@@ -547,6 +547,9 @@ extern bool capable(int cap);
54413 extern bool ns_capable(struct user_namespace *ns, int cap);
54414 extern bool task_ns_capable(struct task_struct *t, int cap);
54415 extern bool nsown_capable(int cap);
54416+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
54417+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
54418+extern bool capable_nolog(int cap);
54419
54420 /* audit system wants to get cap info from files as well */
54421 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
54422diff -urNp linux-3.0.4/include/linux/cleancache.h linux-3.0.4/include/linux/cleancache.h
54423--- linux-3.0.4/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
54424+++ linux-3.0.4/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
54425@@ -31,7 +31,7 @@ struct cleancache_ops {
54426 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
54427 void (*flush_inode)(int, struct cleancache_filekey);
54428 void (*flush_fs)(int);
54429-};
54430+} __no_const;
54431
54432 extern struct cleancache_ops
54433 cleancache_register_ops(struct cleancache_ops *ops);
54434diff -urNp linux-3.0.4/include/linux/compiler-gcc4.h linux-3.0.4/include/linux/compiler-gcc4.h
54435--- linux-3.0.4/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
54436+++ linux-3.0.4/include/linux/compiler-gcc4.h 2011-08-26 19:49:56.000000000 -0400
54437@@ -31,6 +31,12 @@
54438
54439
54440 #if __GNUC_MINOR__ >= 5
54441+
54442+#ifdef CONSTIFY_PLUGIN
54443+#define __no_const __attribute__((no_const))
54444+#define __do_const __attribute__((do_const))
54445+#endif
54446+
54447 /*
54448 * Mark a position in code as unreachable. This can be used to
54449 * suppress control flow warnings after asm blocks that transfer
54450@@ -46,6 +52,11 @@
54451 #define __noclone __attribute__((__noclone__))
54452
54453 #endif
54454+
54455+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
54456+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
54457+#define __bos0(ptr) __bos((ptr), 0)
54458+#define __bos1(ptr) __bos((ptr), 1)
54459 #endif
54460
54461 #if __GNUC_MINOR__ > 0
54462diff -urNp linux-3.0.4/include/linux/compiler.h linux-3.0.4/include/linux/compiler.h
54463--- linux-3.0.4/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
54464+++ linux-3.0.4/include/linux/compiler.h 2011-08-26 19:49:56.000000000 -0400
54465@@ -264,6 +264,14 @@ void ftrace_likely_update(struct ftrace_
54466 # define __attribute_const__ /* unimplemented */
54467 #endif
54468
54469+#ifndef __no_const
54470+# define __no_const
54471+#endif
54472+
54473+#ifndef __do_const
54474+# define __do_const
54475+#endif
54476+
54477 /*
54478 * Tell gcc if a function is cold. The compiler will assume any path
54479 * directly leading to the call is unlikely.
54480@@ -273,6 +281,22 @@ void ftrace_likely_update(struct ftrace_
54481 #define __cold
54482 #endif
54483
54484+#ifndef __alloc_size
54485+#define __alloc_size(...)
54486+#endif
54487+
54488+#ifndef __bos
54489+#define __bos(ptr, arg)
54490+#endif
54491+
54492+#ifndef __bos0
54493+#define __bos0(ptr)
54494+#endif
54495+
54496+#ifndef __bos1
54497+#define __bos1(ptr)
54498+#endif
54499+
54500 /* Simple shorthand for a section definition */
54501 #ifndef __section
54502 # define __section(S) __attribute__ ((__section__(#S)))
54503@@ -306,6 +330,7 @@ void ftrace_likely_update(struct ftrace_
54504 * use is to mediate communication between process-level code and irq/NMI
54505 * handlers, all running on the same CPU.
54506 */
54507-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
54508+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
54509+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
54510
54511 #endif /* __LINUX_COMPILER_H */
54512diff -urNp linux-3.0.4/include/linux/cpuset.h linux-3.0.4/include/linux/cpuset.h
54513--- linux-3.0.4/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
54514+++ linux-3.0.4/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
54515@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
54516 * nodemask.
54517 */
54518 smp_mb();
54519- --ACCESS_ONCE(current->mems_allowed_change_disable);
54520+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
54521 }
54522
54523 static inline void set_mems_allowed(nodemask_t nodemask)
54524diff -urNp linux-3.0.4/include/linux/crypto.h linux-3.0.4/include/linux/crypto.h
54525--- linux-3.0.4/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
54526+++ linux-3.0.4/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
54527@@ -361,7 +361,7 @@ struct cipher_tfm {
54528 const u8 *key, unsigned int keylen);
54529 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
54530 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
54531-};
54532+} __no_const;
54533
54534 struct hash_tfm {
54535 int (*init)(struct hash_desc *desc);
54536@@ -382,13 +382,13 @@ struct compress_tfm {
54537 int (*cot_decompress)(struct crypto_tfm *tfm,
54538 const u8 *src, unsigned int slen,
54539 u8 *dst, unsigned int *dlen);
54540-};
54541+} __no_const;
54542
54543 struct rng_tfm {
54544 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
54545 unsigned int dlen);
54546 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
54547-};
54548+} __no_const;
54549
54550 #define crt_ablkcipher crt_u.ablkcipher
54551 #define crt_aead crt_u.aead
54552diff -urNp linux-3.0.4/include/linux/decompress/mm.h linux-3.0.4/include/linux/decompress/mm.h
54553--- linux-3.0.4/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
54554+++ linux-3.0.4/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
54555@@ -77,7 +77,7 @@ static void free(void *where)
54556 * warnings when not needed (indeed large_malloc / large_free are not
54557 * needed by inflate */
54558
54559-#define malloc(a) kmalloc(a, GFP_KERNEL)
54560+#define malloc(a) kmalloc((a), GFP_KERNEL)
54561 #define free(a) kfree(a)
54562
54563 #define large_malloc(a) vmalloc(a)
54564diff -urNp linux-3.0.4/include/linux/dma-mapping.h linux-3.0.4/include/linux/dma-mapping.h
54565--- linux-3.0.4/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
54566+++ linux-3.0.4/include/linux/dma-mapping.h 2011-08-26 19:49:56.000000000 -0400
54567@@ -50,7 +50,7 @@ struct dma_map_ops {
54568 int (*dma_supported)(struct device *dev, u64 mask);
54569 int (*set_dma_mask)(struct device *dev, u64 mask);
54570 int is_phys;
54571-};
54572+} __do_const;
54573
54574 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
54575
54576diff -urNp linux-3.0.4/include/linux/efi.h linux-3.0.4/include/linux/efi.h
54577--- linux-3.0.4/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
54578+++ linux-3.0.4/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
54579@@ -410,7 +410,7 @@ struct efivar_operations {
54580 efi_get_variable_t *get_variable;
54581 efi_get_next_variable_t *get_next_variable;
54582 efi_set_variable_t *set_variable;
54583-};
54584+} __no_const;
54585
54586 struct efivars {
54587 /*
54588diff -urNp linux-3.0.4/include/linux/elf.h linux-3.0.4/include/linux/elf.h
54589--- linux-3.0.4/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
54590+++ linux-3.0.4/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
54591@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
54592 #define PT_GNU_EH_FRAME 0x6474e550
54593
54594 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
54595+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
54596+
54597+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
54598+
54599+/* Constants for the e_flags field */
54600+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54601+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
54602+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
54603+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
54604+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54605+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54606
54607 /*
54608 * Extended Numbering
54609@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
54610 #define DT_DEBUG 21
54611 #define DT_TEXTREL 22
54612 #define DT_JMPREL 23
54613+#define DT_FLAGS 30
54614+ #define DF_TEXTREL 0x00000004
54615 #define DT_ENCODING 32
54616 #define OLD_DT_LOOS 0x60000000
54617 #define DT_LOOS 0x6000000d
54618@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
54619 #define PF_W 0x2
54620 #define PF_X 0x1
54621
54622+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
54623+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
54624+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
54625+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
54626+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
54627+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
54628+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
54629+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
54630+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
54631+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
54632+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
54633+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
54634+
54635 typedef struct elf32_phdr{
54636 Elf32_Word p_type;
54637 Elf32_Off p_offset;
54638@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
54639 #define EI_OSABI 7
54640 #define EI_PAD 8
54641
54642+#define EI_PAX 14
54643+
54644 #define ELFMAG0 0x7f /* EI_MAG */
54645 #define ELFMAG1 'E'
54646 #define ELFMAG2 'L'
54647@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
54648 #define elf_note elf32_note
54649 #define elf_addr_t Elf32_Off
54650 #define Elf_Half Elf32_Half
54651+#define elf_dyn Elf32_Dyn
54652
54653 #else
54654
54655@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
54656 #define elf_note elf64_note
54657 #define elf_addr_t Elf64_Off
54658 #define Elf_Half Elf64_Half
54659+#define elf_dyn Elf64_Dyn
54660
54661 #endif
54662
54663diff -urNp linux-3.0.4/include/linux/firewire.h linux-3.0.4/include/linux/firewire.h
54664--- linux-3.0.4/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
54665+++ linux-3.0.4/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
54666@@ -428,7 +428,7 @@ struct fw_iso_context {
54667 union {
54668 fw_iso_callback_t sc;
54669 fw_iso_mc_callback_t mc;
54670- } callback;
54671+ } __no_const callback;
54672 void *callback_data;
54673 };
54674
54675diff -urNp linux-3.0.4/include/linux/fscache-cache.h linux-3.0.4/include/linux/fscache-cache.h
54676--- linux-3.0.4/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
54677+++ linux-3.0.4/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
54678@@ -102,7 +102,7 @@ struct fscache_operation {
54679 fscache_operation_release_t release;
54680 };
54681
54682-extern atomic_t fscache_op_debug_id;
54683+extern atomic_unchecked_t fscache_op_debug_id;
54684 extern void fscache_op_work_func(struct work_struct *work);
54685
54686 extern void fscache_enqueue_operation(struct fscache_operation *);
54687@@ -122,7 +122,7 @@ static inline void fscache_operation_ini
54688 {
54689 INIT_WORK(&op->work, fscache_op_work_func);
54690 atomic_set(&op->usage, 1);
54691- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
54692+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
54693 op->processor = processor;
54694 op->release = release;
54695 INIT_LIST_HEAD(&op->pend_link);
54696diff -urNp linux-3.0.4/include/linux/fs.h linux-3.0.4/include/linux/fs.h
54697--- linux-3.0.4/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
54698+++ linux-3.0.4/include/linux/fs.h 2011-08-26 19:49:56.000000000 -0400
54699@@ -109,6 +109,11 @@ struct inodes_stat_t {
54700 /* File was opened by fanotify and shouldn't generate fanotify events */
54701 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
54702
54703+/* Hack for grsec so as not to require read permission simply to execute
54704+ * a binary
54705+ */
54706+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
54707+
54708 /*
54709 * The below are the various read and write types that we support. Some of
54710 * them include behavioral modifiers that send information down to the
54711@@ -1571,7 +1576,8 @@ struct file_operations {
54712 int (*setlease)(struct file *, long, struct file_lock **);
54713 long (*fallocate)(struct file *file, int mode, loff_t offset,
54714 loff_t len);
54715-};
54716+} __do_const;
54717+typedef struct file_operations __no_const file_operations_no_const;
54718
54719 #define IPERM_FLAG_RCU 0x0001
54720
54721diff -urNp linux-3.0.4/include/linux/fsnotify.h linux-3.0.4/include/linux/fsnotify.h
54722--- linux-3.0.4/include/linux/fsnotify.h 2011-07-21 22:17:23.000000000 -0400
54723+++ linux-3.0.4/include/linux/fsnotify.h 2011-08-24 18:10:29.000000000 -0400
54724@@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
54725 */
54726 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
54727 {
54728- return kstrdup(name, GFP_KERNEL);
54729+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
54730 }
54731
54732 /*
54733diff -urNp linux-3.0.4/include/linux/fs_struct.h linux-3.0.4/include/linux/fs_struct.h
54734--- linux-3.0.4/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
54735+++ linux-3.0.4/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
54736@@ -6,7 +6,7 @@
54737 #include <linux/seqlock.h>
54738
54739 struct fs_struct {
54740- int users;
54741+ atomic_t users;
54742 spinlock_t lock;
54743 seqcount_t seq;
54744 int umask;
54745diff -urNp linux-3.0.4/include/linux/ftrace_event.h linux-3.0.4/include/linux/ftrace_event.h
54746--- linux-3.0.4/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
54747+++ linux-3.0.4/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
54748@@ -96,7 +96,7 @@ struct trace_event_functions {
54749 trace_print_func raw;
54750 trace_print_func hex;
54751 trace_print_func binary;
54752-};
54753+} __no_const;
54754
54755 struct trace_event {
54756 struct hlist_node node;
54757@@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
54758 extern int trace_add_event_call(struct ftrace_event_call *call);
54759 extern void trace_remove_event_call(struct ftrace_event_call *call);
54760
54761-#define is_signed_type(type) (((type)(-1)) < 0)
54762+#define is_signed_type(type) (((type)(-1)) < (type)1)
54763
54764 int trace_set_clr_event(const char *system, const char *event, int set);
54765
54766diff -urNp linux-3.0.4/include/linux/genhd.h linux-3.0.4/include/linux/genhd.h
54767--- linux-3.0.4/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
54768+++ linux-3.0.4/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
54769@@ -184,7 +184,7 @@ struct gendisk {
54770 struct kobject *slave_dir;
54771
54772 struct timer_rand_state *random;
54773- atomic_t sync_io; /* RAID */
54774+ atomic_unchecked_t sync_io; /* RAID */
54775 struct disk_events *ev;
54776 #ifdef CONFIG_BLK_DEV_INTEGRITY
54777 struct blk_integrity *integrity;
54778diff -urNp linux-3.0.4/include/linux/gracl.h linux-3.0.4/include/linux/gracl.h
54779--- linux-3.0.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
54780+++ linux-3.0.4/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
54781@@ -0,0 +1,317 @@
54782+#ifndef GR_ACL_H
54783+#define GR_ACL_H
54784+
54785+#include <linux/grdefs.h>
54786+#include <linux/resource.h>
54787+#include <linux/capability.h>
54788+#include <linux/dcache.h>
54789+#include <asm/resource.h>
54790+
54791+/* Major status information */
54792+
54793+#define GR_VERSION "grsecurity 2.2.2"
54794+#define GRSECURITY_VERSION 0x2202
54795+
54796+enum {
54797+ GR_SHUTDOWN = 0,
54798+ GR_ENABLE = 1,
54799+ GR_SPROLE = 2,
54800+ GR_RELOAD = 3,
54801+ GR_SEGVMOD = 4,
54802+ GR_STATUS = 5,
54803+ GR_UNSPROLE = 6,
54804+ GR_PASSSET = 7,
54805+ GR_SPROLEPAM = 8,
54806+};
54807+
54808+/* Password setup definitions
54809+ * kernel/grhash.c */
54810+enum {
54811+ GR_PW_LEN = 128,
54812+ GR_SALT_LEN = 16,
54813+ GR_SHA_LEN = 32,
54814+};
54815+
54816+enum {
54817+ GR_SPROLE_LEN = 64,
54818+};
54819+
54820+enum {
54821+ GR_NO_GLOB = 0,
54822+ GR_REG_GLOB,
54823+ GR_CREATE_GLOB
54824+};
54825+
54826+#define GR_NLIMITS 32
54827+
54828+/* Begin Data Structures */
54829+
54830+struct sprole_pw {
54831+ unsigned char *rolename;
54832+ unsigned char salt[GR_SALT_LEN];
54833+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
54834+};
54835+
54836+struct name_entry {
54837+ __u32 key;
54838+ ino_t inode;
54839+ dev_t device;
54840+ char *name;
54841+ __u16 len;
54842+ __u8 deleted;
54843+ struct name_entry *prev;
54844+ struct name_entry *next;
54845+};
54846+
54847+struct inodev_entry {
54848+ struct name_entry *nentry;
54849+ struct inodev_entry *prev;
54850+ struct inodev_entry *next;
54851+};
54852+
54853+struct acl_role_db {
54854+ struct acl_role_label **r_hash;
54855+ __u32 r_size;
54856+};
54857+
54858+struct inodev_db {
54859+ struct inodev_entry **i_hash;
54860+ __u32 i_size;
54861+};
54862+
54863+struct name_db {
54864+ struct name_entry **n_hash;
54865+ __u32 n_size;
54866+};
54867+
54868+struct crash_uid {
54869+ uid_t uid;
54870+ unsigned long expires;
54871+};
54872+
54873+struct gr_hash_struct {
54874+ void **table;
54875+ void **nametable;
54876+ void *first;
54877+ __u32 table_size;
54878+ __u32 used_size;
54879+ int type;
54880+};
54881+
54882+/* Userspace Grsecurity ACL data structures */
54883+
54884+struct acl_subject_label {
54885+ char *filename;
54886+ ino_t inode;
54887+ dev_t device;
54888+ __u32 mode;
54889+ kernel_cap_t cap_mask;
54890+ kernel_cap_t cap_lower;
54891+ kernel_cap_t cap_invert_audit;
54892+
54893+ struct rlimit res[GR_NLIMITS];
54894+ __u32 resmask;
54895+
54896+ __u8 user_trans_type;
54897+ __u8 group_trans_type;
54898+ uid_t *user_transitions;
54899+ gid_t *group_transitions;
54900+ __u16 user_trans_num;
54901+ __u16 group_trans_num;
54902+
54903+ __u32 sock_families[2];
54904+ __u32 ip_proto[8];
54905+ __u32 ip_type;
54906+ struct acl_ip_label **ips;
54907+ __u32 ip_num;
54908+ __u32 inaddr_any_override;
54909+
54910+ __u32 crashes;
54911+ unsigned long expires;
54912+
54913+ struct acl_subject_label *parent_subject;
54914+ struct gr_hash_struct *hash;
54915+ struct acl_subject_label *prev;
54916+ struct acl_subject_label *next;
54917+
54918+ struct acl_object_label **obj_hash;
54919+ __u32 obj_hash_size;
54920+ __u16 pax_flags;
54921+};
54922+
54923+struct role_allowed_ip {
54924+ __u32 addr;
54925+ __u32 netmask;
54926+
54927+ struct role_allowed_ip *prev;
54928+ struct role_allowed_ip *next;
54929+};
54930+
54931+struct role_transition {
54932+ char *rolename;
54933+
54934+ struct role_transition *prev;
54935+ struct role_transition *next;
54936+};
54937+
54938+struct acl_role_label {
54939+ char *rolename;
54940+ uid_t uidgid;
54941+ __u16 roletype;
54942+
54943+ __u16 auth_attempts;
54944+ unsigned long expires;
54945+
54946+ struct acl_subject_label *root_label;
54947+ struct gr_hash_struct *hash;
54948+
54949+ struct acl_role_label *prev;
54950+ struct acl_role_label *next;
54951+
54952+ struct role_transition *transitions;
54953+ struct role_allowed_ip *allowed_ips;
54954+ uid_t *domain_children;
54955+ __u16 domain_child_num;
54956+
54957+ struct acl_subject_label **subj_hash;
54958+ __u32 subj_hash_size;
54959+};
54960+
54961+struct user_acl_role_db {
54962+ struct acl_role_label **r_table;
54963+ __u32 num_pointers; /* Number of allocations to track */
54964+ __u32 num_roles; /* Number of roles */
54965+ __u32 num_domain_children; /* Number of domain children */
54966+ __u32 num_subjects; /* Number of subjects */
54967+ __u32 num_objects; /* Number of objects */
54968+};
54969+
54970+struct acl_object_label {
54971+ char *filename;
54972+ ino_t inode;
54973+ dev_t device;
54974+ __u32 mode;
54975+
54976+ struct acl_subject_label *nested;
54977+ struct acl_object_label *globbed;
54978+
54979+ /* next two structures not used */
54980+
54981+ struct acl_object_label *prev;
54982+ struct acl_object_label *next;
54983+};
54984+
54985+struct acl_ip_label {
54986+ char *iface;
54987+ __u32 addr;
54988+ __u32 netmask;
54989+ __u16 low, high;
54990+ __u8 mode;
54991+ __u32 type;
54992+ __u32 proto[8];
54993+
54994+ /* next two structures not used */
54995+
54996+ struct acl_ip_label *prev;
54997+ struct acl_ip_label *next;
54998+};
54999+
55000+struct gr_arg {
55001+ struct user_acl_role_db role_db;
55002+ unsigned char pw[GR_PW_LEN];
55003+ unsigned char salt[GR_SALT_LEN];
55004+ unsigned char sum[GR_SHA_LEN];
55005+ unsigned char sp_role[GR_SPROLE_LEN];
55006+ struct sprole_pw *sprole_pws;
55007+ dev_t segv_device;
55008+ ino_t segv_inode;
55009+ uid_t segv_uid;
55010+ __u16 num_sprole_pws;
55011+ __u16 mode;
55012+};
55013+
55014+struct gr_arg_wrapper {
55015+ struct gr_arg *arg;
55016+ __u32 version;
55017+ __u32 size;
55018+};
55019+
55020+struct subject_map {
55021+ struct acl_subject_label *user;
55022+ struct acl_subject_label *kernel;
55023+ struct subject_map *prev;
55024+ struct subject_map *next;
55025+};
55026+
55027+struct acl_subj_map_db {
55028+ struct subject_map **s_hash;
55029+ __u32 s_size;
55030+};
55031+
55032+/* End Data Structures Section */
55033+
55034+/* Hash functions generated by empirical testing by Brad Spengler
55035+ Makes good use of the low bits of the inode. Generally 0-1 times
55036+ in loop for successful match. 0-3 for unsuccessful match.
55037+ Shift/add algorithm with modulus of table size and an XOR*/
55038+
55039+static __inline__ unsigned int
55040+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
55041+{
55042+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
55043+}
55044+
55045+ static __inline__ unsigned int
55046+shash(const struct acl_subject_label *userp, const unsigned int sz)
55047+{
55048+ return ((const unsigned long)userp % sz);
55049+}
55050+
55051+static __inline__ unsigned int
55052+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
55053+{
55054+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
55055+}
55056+
55057+static __inline__ unsigned int
55058+nhash(const char *name, const __u16 len, const unsigned int sz)
55059+{
55060+ return full_name_hash((const unsigned char *)name, len) % sz;
55061+}
55062+
55063+#define FOR_EACH_ROLE_START(role) \
55064+ role = role_list; \
55065+ while (role) {
55066+
55067+#define FOR_EACH_ROLE_END(role) \
55068+ role = role->prev; \
55069+ }
55070+
55071+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
55072+ subj = NULL; \
55073+ iter = 0; \
55074+ while (iter < role->subj_hash_size) { \
55075+ if (subj == NULL) \
55076+ subj = role->subj_hash[iter]; \
55077+ if (subj == NULL) { \
55078+ iter++; \
55079+ continue; \
55080+ }
55081+
55082+#define FOR_EACH_SUBJECT_END(subj,iter) \
55083+ subj = subj->next; \
55084+ if (subj == NULL) \
55085+ iter++; \
55086+ }
55087+
55088+
55089+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
55090+ subj = role->hash->first; \
55091+ while (subj != NULL) {
55092+
55093+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
55094+ subj = subj->next; \
55095+ }
55096+
55097+#endif
55098+
55099diff -urNp linux-3.0.4/include/linux/gralloc.h linux-3.0.4/include/linux/gralloc.h
55100--- linux-3.0.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
55101+++ linux-3.0.4/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
55102@@ -0,0 +1,9 @@
55103+#ifndef __GRALLOC_H
55104+#define __GRALLOC_H
55105+
55106+void acl_free_all(void);
55107+int acl_alloc_stack_init(unsigned long size);
55108+void *acl_alloc(unsigned long len);
55109+void *acl_alloc_num(unsigned long num, unsigned long len);
55110+
55111+#endif
55112diff -urNp linux-3.0.4/include/linux/grdefs.h linux-3.0.4/include/linux/grdefs.h
55113--- linux-3.0.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
55114+++ linux-3.0.4/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
55115@@ -0,0 +1,140 @@
55116+#ifndef GRDEFS_H
55117+#define GRDEFS_H
55118+
55119+/* Begin grsecurity status declarations */
55120+
55121+enum {
55122+ GR_READY = 0x01,
55123+ GR_STATUS_INIT = 0x00 // disabled state
55124+};
55125+
55126+/* Begin ACL declarations */
55127+
55128+/* Role flags */
55129+
55130+enum {
55131+ GR_ROLE_USER = 0x0001,
55132+ GR_ROLE_GROUP = 0x0002,
55133+ GR_ROLE_DEFAULT = 0x0004,
55134+ GR_ROLE_SPECIAL = 0x0008,
55135+ GR_ROLE_AUTH = 0x0010,
55136+ GR_ROLE_NOPW = 0x0020,
55137+ GR_ROLE_GOD = 0x0040,
55138+ GR_ROLE_LEARN = 0x0080,
55139+ GR_ROLE_TPE = 0x0100,
55140+ GR_ROLE_DOMAIN = 0x0200,
55141+ GR_ROLE_PAM = 0x0400,
55142+ GR_ROLE_PERSIST = 0x0800
55143+};
55144+
55145+/* ACL Subject and Object mode flags */
55146+enum {
55147+ GR_DELETED = 0x80000000
55148+};
55149+
55150+/* ACL Object-only mode flags */
55151+enum {
55152+ GR_READ = 0x00000001,
55153+ GR_APPEND = 0x00000002,
55154+ GR_WRITE = 0x00000004,
55155+ GR_EXEC = 0x00000008,
55156+ GR_FIND = 0x00000010,
55157+ GR_INHERIT = 0x00000020,
55158+ GR_SETID = 0x00000040,
55159+ GR_CREATE = 0x00000080,
55160+ GR_DELETE = 0x00000100,
55161+ GR_LINK = 0x00000200,
55162+ GR_AUDIT_READ = 0x00000400,
55163+ GR_AUDIT_APPEND = 0x00000800,
55164+ GR_AUDIT_WRITE = 0x00001000,
55165+ GR_AUDIT_EXEC = 0x00002000,
55166+ GR_AUDIT_FIND = 0x00004000,
55167+ GR_AUDIT_INHERIT= 0x00008000,
55168+ GR_AUDIT_SETID = 0x00010000,
55169+ GR_AUDIT_CREATE = 0x00020000,
55170+ GR_AUDIT_DELETE = 0x00040000,
55171+ GR_AUDIT_LINK = 0x00080000,
55172+ GR_PTRACERD = 0x00100000,
55173+ GR_NOPTRACE = 0x00200000,
55174+ GR_SUPPRESS = 0x00400000,
55175+ GR_NOLEARN = 0x00800000,
55176+ GR_INIT_TRANSFER= 0x01000000
55177+};
55178+
55179+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
55180+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
55181+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
55182+
55183+/* ACL subject-only mode flags */
55184+enum {
55185+ GR_KILL = 0x00000001,
55186+ GR_VIEW = 0x00000002,
55187+ GR_PROTECTED = 0x00000004,
55188+ GR_LEARN = 0x00000008,
55189+ GR_OVERRIDE = 0x00000010,
55190+ /* just a placeholder, this mode is only used in userspace */
55191+ GR_DUMMY = 0x00000020,
55192+ GR_PROTSHM = 0x00000040,
55193+ GR_KILLPROC = 0x00000080,
55194+ GR_KILLIPPROC = 0x00000100,
55195+ /* just a placeholder, this mode is only used in userspace */
55196+ GR_NOTROJAN = 0x00000200,
55197+ GR_PROTPROCFD = 0x00000400,
55198+ GR_PROCACCT = 0x00000800,
55199+ GR_RELAXPTRACE = 0x00001000,
55200+ GR_NESTED = 0x00002000,
55201+ GR_INHERITLEARN = 0x00004000,
55202+ GR_PROCFIND = 0x00008000,
55203+ GR_POVERRIDE = 0x00010000,
55204+ GR_KERNELAUTH = 0x00020000,
55205+ GR_ATSECURE = 0x00040000,
55206+ GR_SHMEXEC = 0x00080000
55207+};
55208+
55209+enum {
55210+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
55211+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
55212+ GR_PAX_ENABLE_MPROTECT = 0x0004,
55213+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
55214+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
55215+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
55216+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
55217+ GR_PAX_DISABLE_MPROTECT = 0x0400,
55218+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
55219+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
55220+};
55221+
55222+enum {
55223+ GR_ID_USER = 0x01,
55224+ GR_ID_GROUP = 0x02,
55225+};
55226+
55227+enum {
55228+ GR_ID_ALLOW = 0x01,
55229+ GR_ID_DENY = 0x02,
55230+};
55231+
55232+#define GR_CRASH_RES 31
55233+#define GR_UIDTABLE_MAX 500
55234+
55235+/* begin resource learning section */
55236+enum {
55237+ GR_RLIM_CPU_BUMP = 60,
55238+ GR_RLIM_FSIZE_BUMP = 50000,
55239+ GR_RLIM_DATA_BUMP = 10000,
55240+ GR_RLIM_STACK_BUMP = 1000,
55241+ GR_RLIM_CORE_BUMP = 10000,
55242+ GR_RLIM_RSS_BUMP = 500000,
55243+ GR_RLIM_NPROC_BUMP = 1,
55244+ GR_RLIM_NOFILE_BUMP = 5,
55245+ GR_RLIM_MEMLOCK_BUMP = 50000,
55246+ GR_RLIM_AS_BUMP = 500000,
55247+ GR_RLIM_LOCKS_BUMP = 2,
55248+ GR_RLIM_SIGPENDING_BUMP = 5,
55249+ GR_RLIM_MSGQUEUE_BUMP = 10000,
55250+ GR_RLIM_NICE_BUMP = 1,
55251+ GR_RLIM_RTPRIO_BUMP = 1,
55252+ GR_RLIM_RTTIME_BUMP = 1000000
55253+};
55254+
55255+#endif
55256diff -urNp linux-3.0.4/include/linux/grinternal.h linux-3.0.4/include/linux/grinternal.h
55257--- linux-3.0.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
55258+++ linux-3.0.4/include/linux/grinternal.h 2011-08-23 21:48:14.000000000 -0400
55259@@ -0,0 +1,219 @@
55260+#ifndef __GRINTERNAL_H
55261+#define __GRINTERNAL_H
55262+
55263+#ifdef CONFIG_GRKERNSEC
55264+
55265+#include <linux/fs.h>
55266+#include <linux/mnt_namespace.h>
55267+#include <linux/nsproxy.h>
55268+#include <linux/gracl.h>
55269+#include <linux/grdefs.h>
55270+#include <linux/grmsg.h>
55271+
55272+void gr_add_learn_entry(const char *fmt, ...)
55273+ __attribute__ ((format (printf, 1, 2)));
55274+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
55275+ const struct vfsmount *mnt);
55276+__u32 gr_check_create(const struct dentry *new_dentry,
55277+ const struct dentry *parent,
55278+ const struct vfsmount *mnt, const __u32 mode);
55279+int gr_check_protected_task(const struct task_struct *task);
55280+__u32 to_gr_audit(const __u32 reqmode);
55281+int gr_set_acls(const int type);
55282+int gr_apply_subject_to_task(struct task_struct *task);
55283+int gr_acl_is_enabled(void);
55284+char gr_roletype_to_char(void);
55285+
55286+void gr_handle_alertkill(struct task_struct *task);
55287+char *gr_to_filename(const struct dentry *dentry,
55288+ const struct vfsmount *mnt);
55289+char *gr_to_filename1(const struct dentry *dentry,
55290+ const struct vfsmount *mnt);
55291+char *gr_to_filename2(const struct dentry *dentry,
55292+ const struct vfsmount *mnt);
55293+char *gr_to_filename3(const struct dentry *dentry,
55294+ const struct vfsmount *mnt);
55295+
55296+extern int grsec_enable_harden_ptrace;
55297+extern int grsec_enable_link;
55298+extern int grsec_enable_fifo;
55299+extern int grsec_enable_execve;
55300+extern int grsec_enable_shm;
55301+extern int grsec_enable_execlog;
55302+extern int grsec_enable_signal;
55303+extern int grsec_enable_audit_ptrace;
55304+extern int grsec_enable_forkfail;
55305+extern int grsec_enable_time;
55306+extern int grsec_enable_rofs;
55307+extern int grsec_enable_chroot_shmat;
55308+extern int grsec_enable_chroot_mount;
55309+extern int grsec_enable_chroot_double;
55310+extern int grsec_enable_chroot_pivot;
55311+extern int grsec_enable_chroot_chdir;
55312+extern int grsec_enable_chroot_chmod;
55313+extern int grsec_enable_chroot_mknod;
55314+extern int grsec_enable_chroot_fchdir;
55315+extern int grsec_enable_chroot_nice;
55316+extern int grsec_enable_chroot_execlog;
55317+extern int grsec_enable_chroot_caps;
55318+extern int grsec_enable_chroot_sysctl;
55319+extern int grsec_enable_chroot_unix;
55320+extern int grsec_enable_tpe;
55321+extern int grsec_tpe_gid;
55322+extern int grsec_enable_tpe_all;
55323+extern int grsec_enable_tpe_invert;
55324+extern int grsec_enable_socket_all;
55325+extern int grsec_socket_all_gid;
55326+extern int grsec_enable_socket_client;
55327+extern int grsec_socket_client_gid;
55328+extern int grsec_enable_socket_server;
55329+extern int grsec_socket_server_gid;
55330+extern int grsec_audit_gid;
55331+extern int grsec_enable_group;
55332+extern int grsec_enable_audit_textrel;
55333+extern int grsec_enable_log_rwxmaps;
55334+extern int grsec_enable_mount;
55335+extern int grsec_enable_chdir;
55336+extern int grsec_resource_logging;
55337+extern int grsec_enable_blackhole;
55338+extern int grsec_lastack_retries;
55339+extern int grsec_enable_brute;
55340+extern int grsec_lock;
55341+
55342+extern spinlock_t grsec_alert_lock;
55343+extern unsigned long grsec_alert_wtime;
55344+extern unsigned long grsec_alert_fyet;
55345+
55346+extern spinlock_t grsec_audit_lock;
55347+
55348+extern rwlock_t grsec_exec_file_lock;
55349+
55350+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
55351+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
55352+ (tsk)->exec_file->f_vfsmnt) : "/")
55353+
55354+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
55355+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
55356+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55357+
55358+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
55359+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
55360+ (tsk)->exec_file->f_vfsmnt) : "/")
55361+
55362+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
55363+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
55364+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55365+
55366+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
55367+
55368+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
55369+
55370+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
55371+ (task)->pid, (cred)->uid, \
55372+ (cred)->euid, (cred)->gid, (cred)->egid, \
55373+ gr_parent_task_fullpath(task), \
55374+ (task)->real_parent->comm, (task)->real_parent->pid, \
55375+ (pcred)->uid, (pcred)->euid, \
55376+ (pcred)->gid, (pcred)->egid
55377+
55378+#define GR_CHROOT_CAPS {{ \
55379+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
55380+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
55381+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
55382+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
55383+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
55384+ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
55385+
55386+#define security_learn(normal_msg,args...) \
55387+({ \
55388+ read_lock(&grsec_exec_file_lock); \
55389+ gr_add_learn_entry(normal_msg "\n", ## args); \
55390+ read_unlock(&grsec_exec_file_lock); \
55391+})
55392+
55393+enum {
55394+ GR_DO_AUDIT,
55395+ GR_DONT_AUDIT,
55396+ /* used for non-audit messages that we shouldn't kill the task on */
55397+ GR_DONT_AUDIT_GOOD
55398+};
55399+
55400+enum {
55401+ GR_TTYSNIFF,
55402+ GR_RBAC,
55403+ GR_RBAC_STR,
55404+ GR_STR_RBAC,
55405+ GR_RBAC_MODE2,
55406+ GR_RBAC_MODE3,
55407+ GR_FILENAME,
55408+ GR_SYSCTL_HIDDEN,
55409+ GR_NOARGS,
55410+ GR_ONE_INT,
55411+ GR_ONE_INT_TWO_STR,
55412+ GR_ONE_STR,
55413+ GR_STR_INT,
55414+ GR_TWO_STR_INT,
55415+ GR_TWO_INT,
55416+ GR_TWO_U64,
55417+ GR_THREE_INT,
55418+ GR_FIVE_INT_TWO_STR,
55419+ GR_TWO_STR,
55420+ GR_THREE_STR,
55421+ GR_FOUR_STR,
55422+ GR_STR_FILENAME,
55423+ GR_FILENAME_STR,
55424+ GR_FILENAME_TWO_INT,
55425+ GR_FILENAME_TWO_INT_STR,
55426+ GR_TEXTREL,
55427+ GR_PTRACE,
55428+ GR_RESOURCE,
55429+ GR_CAP,
55430+ GR_SIG,
55431+ GR_SIG2,
55432+ GR_CRASH1,
55433+ GR_CRASH2,
55434+ GR_PSACCT,
55435+ GR_RWXMAP
55436+};
55437+
55438+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
55439+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
55440+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
55441+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
55442+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
55443+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
55444+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
55445+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
55446+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
55447+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
55448+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
55449+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
55450+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
55451+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
55452+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
55453+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
55454+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
55455+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
55456+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
55457+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
55458+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
55459+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
55460+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
55461+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
55462+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
55463+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
55464+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
55465+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
55466+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
55467+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
55468+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
55469+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
55470+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
55471+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
55472+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
55473+
55474+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
55475+
55476+#endif
55477+
55478+#endif
55479diff -urNp linux-3.0.4/include/linux/grmsg.h linux-3.0.4/include/linux/grmsg.h
55480--- linux-3.0.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
55481+++ linux-3.0.4/include/linux/grmsg.h 2011-09-14 09:16:54.000000000 -0400
55482@@ -0,0 +1,108 @@
55483+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
55484+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
55485+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
55486+#define GR_STOPMOD_MSG "denied modification of module state by "
55487+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
55488+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
55489+#define GR_IOPERM_MSG "denied use of ioperm() by "
55490+#define GR_IOPL_MSG "denied use of iopl() by "
55491+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
55492+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
55493+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
55494+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
55495+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
55496+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
55497+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
55498+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
55499+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
55500+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
55501+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
55502+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
55503+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
55504+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
55505+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
55506+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
55507+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
55508+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
55509+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
55510+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
55511+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
55512+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
55513+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
55514+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
55515+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
55516+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
55517+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
55518+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
55519+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
55520+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
55521+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
55522+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
55523+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
55524+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
55525+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
55526+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
55527+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
55528+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
55529+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
55530+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
55531+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
55532+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
55533+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
55534+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
55535+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
55536+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
55537+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
55538+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
55539+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
55540+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
55541+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
55542+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
55543+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
55544+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
55545+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
55546+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
55547+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
55548+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
55549+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
55550+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
55551+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
55552+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
55553+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
55554+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
55555+#define GR_FAILFORK_MSG "failed fork with errno %s by "
55556+#define GR_NICE_CHROOT_MSG "denied priority change by "
55557+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
55558+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
55559+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
55560+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
55561+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
55562+#define GR_TIME_MSG "time set by "
55563+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
55564+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
55565+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
55566+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
55567+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
55568+#define GR_BIND_MSG "denied bind() by "
55569+#define GR_CONNECT_MSG "denied connect() by "
55570+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
55571+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
55572+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
55573+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
55574+#define GR_CAP_ACL_MSG "use of %s denied for "
55575+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
55576+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
55577+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
55578+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
55579+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
55580+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
55581+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
55582+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
55583+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
55584+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
55585+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
55586+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
55587+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
55588+#define GR_VM86_MSG "denied use of vm86 by "
55589+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
55590+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
55591diff -urNp linux-3.0.4/include/linux/grsecurity.h linux-3.0.4/include/linux/grsecurity.h
55592--- linux-3.0.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
55593+++ linux-3.0.4/include/linux/grsecurity.h 2011-09-14 09:16:54.000000000 -0400
55594@@ -0,0 +1,226 @@
55595+#ifndef GR_SECURITY_H
55596+#define GR_SECURITY_H
55597+#include <linux/fs.h>
55598+#include <linux/fs_struct.h>
55599+#include <linux/binfmts.h>
55600+#include <linux/gracl.h>
55601+
55602+/* notify of brain-dead configs */
55603+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55604+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
55605+#endif
55606+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
55607+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
55608+#endif
55609+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55610+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55611+#endif
55612+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55613+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55614+#endif
55615+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
55616+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
55617+#endif
55618+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
55619+#error "CONFIG_PAX enabled, but no PaX options are enabled."
55620+#endif
55621+
55622+#include <linux/compat.h>
55623+
55624+struct user_arg_ptr {
55625+#ifdef CONFIG_COMPAT
55626+ bool is_compat;
55627+#endif
55628+ union {
55629+ const char __user *const __user *native;
55630+#ifdef CONFIG_COMPAT
55631+ compat_uptr_t __user *compat;
55632+#endif
55633+ } ptr;
55634+};
55635+
55636+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
55637+void gr_handle_brute_check(void);
55638+void gr_handle_kernel_exploit(void);
55639+int gr_process_user_ban(void);
55640+
55641+char gr_roletype_to_char(void);
55642+
55643+int gr_acl_enable_at_secure(void);
55644+
55645+int gr_check_user_change(int real, int effective, int fs);
55646+int gr_check_group_change(int real, int effective, int fs);
55647+
55648+void gr_del_task_from_ip_table(struct task_struct *p);
55649+
55650+int gr_pid_is_chrooted(struct task_struct *p);
55651+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
55652+int gr_handle_chroot_nice(void);
55653+int gr_handle_chroot_sysctl(const int op);
55654+int gr_handle_chroot_setpriority(struct task_struct *p,
55655+ const int niceval);
55656+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
55657+int gr_handle_chroot_chroot(const struct dentry *dentry,
55658+ const struct vfsmount *mnt);
55659+void gr_handle_chroot_chdir(struct path *path);
55660+int gr_handle_chroot_chmod(const struct dentry *dentry,
55661+ const struct vfsmount *mnt, const int mode);
55662+int gr_handle_chroot_mknod(const struct dentry *dentry,
55663+ const struct vfsmount *mnt, const int mode);
55664+int gr_handle_chroot_mount(const struct dentry *dentry,
55665+ const struct vfsmount *mnt,
55666+ const char *dev_name);
55667+int gr_handle_chroot_pivot(void);
55668+int gr_handle_chroot_unix(const pid_t pid);
55669+
55670+int gr_handle_rawio(const struct inode *inode);
55671+
55672+void gr_handle_ioperm(void);
55673+void gr_handle_iopl(void);
55674+
55675+int gr_tpe_allow(const struct file *file);
55676+
55677+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
55678+void gr_clear_chroot_entries(struct task_struct *task);
55679+
55680+void gr_log_forkfail(const int retval);
55681+void gr_log_timechange(void);
55682+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
55683+void gr_log_chdir(const struct dentry *dentry,
55684+ const struct vfsmount *mnt);
55685+void gr_log_chroot_exec(const struct dentry *dentry,
55686+ const struct vfsmount *mnt);
55687+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
55688+void gr_log_remount(const char *devname, const int retval);
55689+void gr_log_unmount(const char *devname, const int retval);
55690+void gr_log_mount(const char *from, const char *to, const int retval);
55691+void gr_log_textrel(struct vm_area_struct *vma);
55692+void gr_log_rwxmmap(struct file *file);
55693+void gr_log_rwxmprotect(struct file *file);
55694+
55695+int gr_handle_follow_link(const struct inode *parent,
55696+ const struct inode *inode,
55697+ const struct dentry *dentry,
55698+ const struct vfsmount *mnt);
55699+int gr_handle_fifo(const struct dentry *dentry,
55700+ const struct vfsmount *mnt,
55701+ const struct dentry *dir, const int flag,
55702+ const int acc_mode);
55703+int gr_handle_hardlink(const struct dentry *dentry,
55704+ const struct vfsmount *mnt,
55705+ struct inode *inode,
55706+ const int mode, const char *to);
55707+
55708+int gr_is_capable(const int cap);
55709+int gr_is_capable_nolog(const int cap);
55710+void gr_learn_resource(const struct task_struct *task, const int limit,
55711+ const unsigned long wanted, const int gt);
55712+void gr_copy_label(struct task_struct *tsk);
55713+void gr_handle_crash(struct task_struct *task, const int sig);
55714+int gr_handle_signal(const struct task_struct *p, const int sig);
55715+int gr_check_crash_uid(const uid_t uid);
55716+int gr_check_protected_task(const struct task_struct *task);
55717+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
55718+int gr_acl_handle_mmap(const struct file *file,
55719+ const unsigned long prot);
55720+int gr_acl_handle_mprotect(const struct file *file,
55721+ const unsigned long prot);
55722+int gr_check_hidden_task(const struct task_struct *tsk);
55723+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
55724+ const struct vfsmount *mnt);
55725+__u32 gr_acl_handle_utime(const struct dentry *dentry,
55726+ const struct vfsmount *mnt);
55727+__u32 gr_acl_handle_access(const struct dentry *dentry,
55728+ const struct vfsmount *mnt, const int fmode);
55729+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
55730+ const struct vfsmount *mnt, mode_t mode);
55731+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
55732+ const struct vfsmount *mnt, mode_t mode);
55733+__u32 gr_acl_handle_chown(const struct dentry *dentry,
55734+ const struct vfsmount *mnt);
55735+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
55736+ const struct vfsmount *mnt);
55737+int gr_handle_ptrace(struct task_struct *task, const long request);
55738+int gr_handle_proc_ptrace(struct task_struct *task);
55739+__u32 gr_acl_handle_execve(const struct dentry *dentry,
55740+ const struct vfsmount *mnt);
55741+int gr_check_crash_exec(const struct file *filp);
55742+int gr_acl_is_enabled(void);
55743+void gr_set_kernel_label(struct task_struct *task);
55744+void gr_set_role_label(struct task_struct *task, const uid_t uid,
55745+ const gid_t gid);
55746+int gr_set_proc_label(const struct dentry *dentry,
55747+ const struct vfsmount *mnt,
55748+ const int unsafe_share);
55749+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
55750+ const struct vfsmount *mnt);
55751+__u32 gr_acl_handle_open(const struct dentry *dentry,
55752+ const struct vfsmount *mnt, const int fmode);
55753+__u32 gr_acl_handle_creat(const struct dentry *dentry,
55754+ const struct dentry *p_dentry,
55755+ const struct vfsmount *p_mnt, const int fmode,
55756+ const int imode);
55757+void gr_handle_create(const struct dentry *dentry,
55758+ const struct vfsmount *mnt);
55759+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
55760+ const struct dentry *parent_dentry,
55761+ const struct vfsmount *parent_mnt,
55762+ const int mode);
55763+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
55764+ const struct dentry *parent_dentry,
55765+ const struct vfsmount *parent_mnt);
55766+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
55767+ const struct vfsmount *mnt);
55768+void gr_handle_delete(const ino_t ino, const dev_t dev);
55769+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
55770+ const struct vfsmount *mnt);
55771+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
55772+ const struct dentry *parent_dentry,
55773+ const struct vfsmount *parent_mnt,
55774+ const char *from);
55775+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
55776+ const struct dentry *parent_dentry,
55777+ const struct vfsmount *parent_mnt,
55778+ const struct dentry *old_dentry,
55779+ const struct vfsmount *old_mnt, const char *to);
55780+int gr_acl_handle_rename(struct dentry *new_dentry,
55781+ struct dentry *parent_dentry,
55782+ const struct vfsmount *parent_mnt,
55783+ struct dentry *old_dentry,
55784+ struct inode *old_parent_inode,
55785+ struct vfsmount *old_mnt, const char *newname);
55786+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
55787+ struct dentry *old_dentry,
55788+ struct dentry *new_dentry,
55789+ struct vfsmount *mnt, const __u8 replace);
55790+__u32 gr_check_link(const struct dentry *new_dentry,
55791+ const struct dentry *parent_dentry,
55792+ const struct vfsmount *parent_mnt,
55793+ const struct dentry *old_dentry,
55794+ const struct vfsmount *old_mnt);
55795+int gr_acl_handle_filldir(const struct file *file, const char *name,
55796+ const unsigned int namelen, const ino_t ino);
55797+
55798+__u32 gr_acl_handle_unix(const struct dentry *dentry,
55799+ const struct vfsmount *mnt);
55800+void gr_acl_handle_exit(void);
55801+void gr_acl_handle_psacct(struct task_struct *task, const long code);
55802+int gr_acl_handle_procpidmem(const struct task_struct *task);
55803+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
55804+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
55805+void gr_audit_ptrace(struct task_struct *task);
55806+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
55807+
55808+#ifdef CONFIG_GRKERNSEC
55809+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
55810+void gr_handle_vm86(void);
55811+void gr_handle_mem_readwrite(u64 from, u64 to);
55812+
55813+extern int grsec_enable_dmesg;
55814+extern int grsec_disable_privio;
55815+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55816+extern int grsec_enable_chroot_findtask;
55817+#endif
55818+#endif
55819+
55820+#endif
55821diff -urNp linux-3.0.4/include/linux/grsock.h linux-3.0.4/include/linux/grsock.h
55822--- linux-3.0.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
55823+++ linux-3.0.4/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
55824@@ -0,0 +1,19 @@
55825+#ifndef __GRSOCK_H
55826+#define __GRSOCK_H
55827+
55828+extern void gr_attach_curr_ip(const struct sock *sk);
55829+extern int gr_handle_sock_all(const int family, const int type,
55830+ const int protocol);
55831+extern int gr_handle_sock_server(const struct sockaddr *sck);
55832+extern int gr_handle_sock_server_other(const struct sock *sck);
55833+extern int gr_handle_sock_client(const struct sockaddr *sck);
55834+extern int gr_search_connect(struct socket * sock,
55835+ struct sockaddr_in * addr);
55836+extern int gr_search_bind(struct socket * sock,
55837+ struct sockaddr_in * addr);
55838+extern int gr_search_listen(struct socket * sock);
55839+extern int gr_search_accept(struct socket * sock);
55840+extern int gr_search_socket(const int domain, const int type,
55841+ const int protocol);
55842+
55843+#endif
55844diff -urNp linux-3.0.4/include/linux/hid.h linux-3.0.4/include/linux/hid.h
55845--- linux-3.0.4/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
55846+++ linux-3.0.4/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
55847@@ -675,7 +675,7 @@ struct hid_ll_driver {
55848 unsigned int code, int value);
55849
55850 int (*parse)(struct hid_device *hdev);
55851-};
55852+} __no_const;
55853
55854 #define PM_HINT_FULLON 1<<5
55855 #define PM_HINT_NORMAL 1<<1
55856diff -urNp linux-3.0.4/include/linux/highmem.h linux-3.0.4/include/linux/highmem.h
55857--- linux-3.0.4/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
55858+++ linux-3.0.4/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
55859@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
55860 kunmap_atomic(kaddr, KM_USER0);
55861 }
55862
55863+static inline void sanitize_highpage(struct page *page)
55864+{
55865+ void *kaddr;
55866+ unsigned long flags;
55867+
55868+ local_irq_save(flags);
55869+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
55870+ clear_page(kaddr);
55871+ kunmap_atomic(kaddr, KM_CLEARPAGE);
55872+ local_irq_restore(flags);
55873+}
55874+
55875 static inline void zero_user_segments(struct page *page,
55876 unsigned start1, unsigned end1,
55877 unsigned start2, unsigned end2)
55878diff -urNp linux-3.0.4/include/linux/i2c.h linux-3.0.4/include/linux/i2c.h
55879--- linux-3.0.4/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
55880+++ linux-3.0.4/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
55881@@ -346,6 +346,7 @@ struct i2c_algorithm {
55882 /* To determine what the adapter supports */
55883 u32 (*functionality) (struct i2c_adapter *);
55884 };
55885+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
55886
55887 /*
55888 * i2c_adapter is the structure used to identify a physical i2c bus along
55889diff -urNp linux-3.0.4/include/linux/i2o.h linux-3.0.4/include/linux/i2o.h
55890--- linux-3.0.4/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
55891+++ linux-3.0.4/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
55892@@ -564,7 +564,7 @@ struct i2o_controller {
55893 struct i2o_device *exec; /* Executive */
55894 #if BITS_PER_LONG == 64
55895 spinlock_t context_list_lock; /* lock for context_list */
55896- atomic_t context_list_counter; /* needed for unique contexts */
55897+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
55898 struct list_head context_list; /* list of context id's
55899 and pointers */
55900 #endif
55901diff -urNp linux-3.0.4/include/linux/init.h linux-3.0.4/include/linux/init.h
55902--- linux-3.0.4/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
55903+++ linux-3.0.4/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
55904@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
55905
55906 /* Each module must use one module_init(). */
55907 #define module_init(initfn) \
55908- static inline initcall_t __inittest(void) \
55909+ static inline __used initcall_t __inittest(void) \
55910 { return initfn; } \
55911 int init_module(void) __attribute__((alias(#initfn)));
55912
55913 /* This is only required if you want to be unloadable. */
55914 #define module_exit(exitfn) \
55915- static inline exitcall_t __exittest(void) \
55916+ static inline __used exitcall_t __exittest(void) \
55917 { return exitfn; } \
55918 void cleanup_module(void) __attribute__((alias(#exitfn)));
55919
55920diff -urNp linux-3.0.4/include/linux/init_task.h linux-3.0.4/include/linux/init_task.h
55921--- linux-3.0.4/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
55922+++ linux-3.0.4/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
55923@@ -126,6 +126,12 @@ extern struct cred init_cred;
55924 # define INIT_PERF_EVENTS(tsk)
55925 #endif
55926
55927+#ifdef CONFIG_X86
55928+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
55929+#else
55930+#define INIT_TASK_THREAD_INFO
55931+#endif
55932+
55933 /*
55934 * INIT_TASK is used to set up the first task table, touch at
55935 * your own risk!. Base=0, limit=0x1fffff (=2MB)
55936@@ -164,6 +170,7 @@ extern struct cred init_cred;
55937 RCU_INIT_POINTER(.cred, &init_cred), \
55938 .comm = "swapper", \
55939 .thread = INIT_THREAD, \
55940+ INIT_TASK_THREAD_INFO \
55941 .fs = &init_fs, \
55942 .files = &init_files, \
55943 .signal = &init_signals, \
55944diff -urNp linux-3.0.4/include/linux/intel-iommu.h linux-3.0.4/include/linux/intel-iommu.h
55945--- linux-3.0.4/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
55946+++ linux-3.0.4/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
55947@@ -296,7 +296,7 @@ struct iommu_flush {
55948 u8 fm, u64 type);
55949 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
55950 unsigned int size_order, u64 type);
55951-};
55952+} __no_const;
55953
55954 enum {
55955 SR_DMAR_FECTL_REG,
55956diff -urNp linux-3.0.4/include/linux/interrupt.h linux-3.0.4/include/linux/interrupt.h
55957--- linux-3.0.4/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
55958+++ linux-3.0.4/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
55959@@ -422,7 +422,7 @@ enum
55960 /* map softirq index to softirq name. update 'softirq_to_name' in
55961 * kernel/softirq.c when adding a new softirq.
55962 */
55963-extern char *softirq_to_name[NR_SOFTIRQS];
55964+extern const char * const softirq_to_name[NR_SOFTIRQS];
55965
55966 /* softirq mask and active fields moved to irq_cpustat_t in
55967 * asm/hardirq.h to get better cache usage. KAO
55968@@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
55969
55970 struct softirq_action
55971 {
55972- void (*action)(struct softirq_action *);
55973+ void (*action)(void);
55974 };
55975
55976 asmlinkage void do_softirq(void);
55977 asmlinkage void __do_softirq(void);
55978-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
55979+extern void open_softirq(int nr, void (*action)(void));
55980 extern void softirq_init(void);
55981 static inline void __raise_softirq_irqoff(unsigned int nr)
55982 {
55983diff -urNp linux-3.0.4/include/linux/kallsyms.h linux-3.0.4/include/linux/kallsyms.h
55984--- linux-3.0.4/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
55985+++ linux-3.0.4/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
55986@@ -15,7 +15,8 @@
55987
55988 struct module;
55989
55990-#ifdef CONFIG_KALLSYMS
55991+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
55992+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55993 /* Lookup the address for a symbol. Returns 0 if not found. */
55994 unsigned long kallsyms_lookup_name(const char *name);
55995
55996@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
55997 /* Stupid that this does nothing, but I didn't create this mess. */
55998 #define __print_symbol(fmt, addr)
55999 #endif /*CONFIG_KALLSYMS*/
56000+#else /* when included by kallsyms.c, vsnprintf.c, or
56001+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
56002+extern void __print_symbol(const char *fmt, unsigned long address);
56003+extern int sprint_backtrace(char *buffer, unsigned long address);
56004+extern int sprint_symbol(char *buffer, unsigned long address);
56005+const char *kallsyms_lookup(unsigned long addr,
56006+ unsigned long *symbolsize,
56007+ unsigned long *offset,
56008+ char **modname, char *namebuf);
56009+#endif
56010
56011 /* This macro allows us to keep printk typechecking */
56012 static void __check_printsym_format(const char *fmt, ...)
56013diff -urNp linux-3.0.4/include/linux/kgdb.h linux-3.0.4/include/linux/kgdb.h
56014--- linux-3.0.4/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
56015+++ linux-3.0.4/include/linux/kgdb.h 2011-08-26 19:49:56.000000000 -0400
56016@@ -53,7 +53,7 @@ extern int kgdb_connected;
56017 extern int kgdb_io_module_registered;
56018
56019 extern atomic_t kgdb_setting_breakpoint;
56020-extern atomic_t kgdb_cpu_doing_single_step;
56021+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
56022
56023 extern struct task_struct *kgdb_usethread;
56024 extern struct task_struct *kgdb_contthread;
56025@@ -251,7 +251,7 @@ struct kgdb_arch {
56026 void (*disable_hw_break)(struct pt_regs *regs);
56027 void (*remove_all_hw_break)(void);
56028 void (*correct_hw_break)(void);
56029-};
56030+} __do_const;
56031
56032 /**
56033 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
56034@@ -276,7 +276,7 @@ struct kgdb_io {
56035 void (*pre_exception) (void);
56036 void (*post_exception) (void);
56037 int is_console;
56038-};
56039+} __do_const;
56040
56041 extern struct kgdb_arch arch_kgdb_ops;
56042
56043diff -urNp linux-3.0.4/include/linux/kmod.h linux-3.0.4/include/linux/kmod.h
56044--- linux-3.0.4/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
56045+++ linux-3.0.4/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
56046@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
56047 * usually useless though. */
56048 extern int __request_module(bool wait, const char *name, ...) \
56049 __attribute__((format(printf, 2, 3)));
56050+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
56051+ __attribute__((format(printf, 3, 4)));
56052 #define request_module(mod...) __request_module(true, mod)
56053 #define request_module_nowait(mod...) __request_module(false, mod)
56054 #define try_then_request_module(x, mod...) \
56055diff -urNp linux-3.0.4/include/linux/kvm_host.h linux-3.0.4/include/linux/kvm_host.h
56056--- linux-3.0.4/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
56057+++ linux-3.0.4/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
56058@@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
56059 void vcpu_load(struct kvm_vcpu *vcpu);
56060 void vcpu_put(struct kvm_vcpu *vcpu);
56061
56062-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
56063+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
56064 struct module *module);
56065 void kvm_exit(void);
56066
56067@@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
56068 struct kvm_guest_debug *dbg);
56069 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
56070
56071-int kvm_arch_init(void *opaque);
56072+int kvm_arch_init(const void *opaque);
56073 void kvm_arch_exit(void);
56074
56075 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
56076diff -urNp linux-3.0.4/include/linux/libata.h linux-3.0.4/include/linux/libata.h
56077--- linux-3.0.4/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
56078+++ linux-3.0.4/include/linux/libata.h 2011-08-26 19:49:56.000000000 -0400
56079@@ -899,7 +899,7 @@ struct ata_port_operations {
56080 * fields must be pointers.
56081 */
56082 const struct ata_port_operations *inherits;
56083-};
56084+} __do_const;
56085
56086 struct ata_port_info {
56087 unsigned long flags;
56088diff -urNp linux-3.0.4/include/linux/mca.h linux-3.0.4/include/linux/mca.h
56089--- linux-3.0.4/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
56090+++ linux-3.0.4/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
56091@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
56092 int region);
56093 void * (*mca_transform_memory)(struct mca_device *,
56094 void *memory);
56095-};
56096+} __no_const;
56097
56098 struct mca_bus {
56099 u64 default_dma_mask;
56100diff -urNp linux-3.0.4/include/linux/memory.h linux-3.0.4/include/linux/memory.h
56101--- linux-3.0.4/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
56102+++ linux-3.0.4/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
56103@@ -144,7 +144,7 @@ struct memory_accessor {
56104 size_t count);
56105 ssize_t (*write)(struct memory_accessor *, const char *buf,
56106 off_t offset, size_t count);
56107-};
56108+} __no_const;
56109
56110 /*
56111 * Kernel text modification mutex, used for code patching. Users of this lock
56112diff -urNp linux-3.0.4/include/linux/mfd/abx500.h linux-3.0.4/include/linux/mfd/abx500.h
56113--- linux-3.0.4/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
56114+++ linux-3.0.4/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
56115@@ -234,6 +234,7 @@ struct abx500_ops {
56116 int (*event_registers_startup_state_get) (struct device *, u8 *);
56117 int (*startup_irq_enabled) (struct device *, unsigned int);
56118 };
56119+typedef struct abx500_ops __no_const abx500_ops_no_const;
56120
56121 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
56122 void abx500_remove_ops(struct device *dev);
56123diff -urNp linux-3.0.4/include/linux/mm.h linux-3.0.4/include/linux/mm.h
56124--- linux-3.0.4/include/linux/mm.h 2011-09-02 18:11:21.000000000 -0400
56125+++ linux-3.0.4/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
56126@@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
56127
56128 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
56129 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
56130+
56131+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
56132+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
56133+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
56134+#else
56135 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
56136+#endif
56137+
56138 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
56139 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
56140
56141@@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
56142 int set_page_dirty_lock(struct page *page);
56143 int clear_page_dirty_for_io(struct page *page);
56144
56145-/* Is the vma a continuation of the stack vma above it? */
56146-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
56147-{
56148- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
56149-}
56150-
56151-static inline int stack_guard_page_start(struct vm_area_struct *vma,
56152- unsigned long addr)
56153-{
56154- return (vma->vm_flags & VM_GROWSDOWN) &&
56155- (vma->vm_start == addr) &&
56156- !vma_growsdown(vma->vm_prev, addr);
56157-}
56158-
56159-/* Is the vma a continuation of the stack vma below it? */
56160-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
56161-{
56162- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
56163-}
56164-
56165-static inline int stack_guard_page_end(struct vm_area_struct *vma,
56166- unsigned long addr)
56167-{
56168- return (vma->vm_flags & VM_GROWSUP) &&
56169- (vma->vm_end == addr) &&
56170- !vma_growsup(vma->vm_next, addr);
56171-}
56172-
56173 extern unsigned long move_page_tables(struct vm_area_struct *vma,
56174 unsigned long old_addr, struct vm_area_struct *new_vma,
56175 unsigned long new_addr, unsigned long len);
56176@@ -1169,6 +1148,15 @@ struct shrinker {
56177 extern void register_shrinker(struct shrinker *);
56178 extern void unregister_shrinker(struct shrinker *);
56179
56180+#ifdef CONFIG_MMU
56181+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
56182+#else
56183+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
56184+{
56185+ return __pgprot(0);
56186+}
56187+#endif
56188+
56189 int vma_wants_writenotify(struct vm_area_struct *vma);
56190
56191 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
56192@@ -1452,6 +1440,7 @@ out:
56193 }
56194
56195 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
56196+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
56197
56198 extern unsigned long do_brk(unsigned long, unsigned long);
56199
56200@@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
56201 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
56202 struct vm_area_struct **pprev);
56203
56204+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
56205+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
56206+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
56207+
56208 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
56209 NULL if none. Assume start_addr < end_addr. */
56210 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
56211@@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
56212 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
56213 }
56214
56215-#ifdef CONFIG_MMU
56216-pgprot_t vm_get_page_prot(unsigned long vm_flags);
56217-#else
56218-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
56219-{
56220- return __pgprot(0);
56221-}
56222-#endif
56223-
56224 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
56225 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
56226 unsigned long pfn, unsigned long size, pgprot_t);
56227@@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
56228 extern int sysctl_memory_failure_early_kill;
56229 extern int sysctl_memory_failure_recovery;
56230 extern void shake_page(struct page *p, int access);
56231-extern atomic_long_t mce_bad_pages;
56232+extern atomic_long_unchecked_t mce_bad_pages;
56233 extern int soft_offline_page(struct page *page, int flags);
56234
56235 extern void dump_page(struct page *page);
56236@@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
56237 unsigned int pages_per_huge_page);
56238 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
56239
56240+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56241+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
56242+#else
56243+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
56244+#endif
56245+
56246 #endif /* __KERNEL__ */
56247 #endif /* _LINUX_MM_H */
56248diff -urNp linux-3.0.4/include/linux/mm_types.h linux-3.0.4/include/linux/mm_types.h
56249--- linux-3.0.4/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
56250+++ linux-3.0.4/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
56251@@ -184,6 +184,8 @@ struct vm_area_struct {
56252 #ifdef CONFIG_NUMA
56253 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
56254 #endif
56255+
56256+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
56257 };
56258
56259 struct core_thread {
56260@@ -316,6 +318,24 @@ struct mm_struct {
56261 #ifdef CONFIG_CPUMASK_OFFSTACK
56262 struct cpumask cpumask_allocation;
56263 #endif
56264+
56265+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56266+ unsigned long pax_flags;
56267+#endif
56268+
56269+#ifdef CONFIG_PAX_DLRESOLVE
56270+ unsigned long call_dl_resolve;
56271+#endif
56272+
56273+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
56274+ unsigned long call_syscall;
56275+#endif
56276+
56277+#ifdef CONFIG_PAX_ASLR
56278+ unsigned long delta_mmap; /* randomized offset */
56279+ unsigned long delta_stack; /* randomized offset */
56280+#endif
56281+
56282 };
56283
56284 static inline void mm_init_cpumask(struct mm_struct *mm)
56285diff -urNp linux-3.0.4/include/linux/mmu_notifier.h linux-3.0.4/include/linux/mmu_notifier.h
56286--- linux-3.0.4/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
56287+++ linux-3.0.4/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
56288@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
56289 */
56290 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
56291 ({ \
56292- pte_t __pte; \
56293+ pte_t ___pte; \
56294 struct vm_area_struct *___vma = __vma; \
56295 unsigned long ___address = __address; \
56296- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
56297+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
56298 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
56299- __pte; \
56300+ ___pte; \
56301 })
56302
56303 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
56304diff -urNp linux-3.0.4/include/linux/mmzone.h linux-3.0.4/include/linux/mmzone.h
56305--- linux-3.0.4/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
56306+++ linux-3.0.4/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
56307@@ -350,7 +350,7 @@ struct zone {
56308 unsigned long flags; /* zone flags, see below */
56309
56310 /* Zone statistics */
56311- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56312+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56313
56314 /*
56315 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
56316diff -urNp linux-3.0.4/include/linux/mod_devicetable.h linux-3.0.4/include/linux/mod_devicetable.h
56317--- linux-3.0.4/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
56318+++ linux-3.0.4/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
56319@@ -12,7 +12,7 @@
56320 typedef unsigned long kernel_ulong_t;
56321 #endif
56322
56323-#define PCI_ANY_ID (~0)
56324+#define PCI_ANY_ID ((__u16)~0)
56325
56326 struct pci_device_id {
56327 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
56328@@ -131,7 +131,7 @@ struct usb_device_id {
56329 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
56330 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
56331
56332-#define HID_ANY_ID (~0)
56333+#define HID_ANY_ID (~0U)
56334
56335 struct hid_device_id {
56336 __u16 bus;
56337diff -urNp linux-3.0.4/include/linux/module.h linux-3.0.4/include/linux/module.h
56338--- linux-3.0.4/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
56339+++ linux-3.0.4/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
56340@@ -16,6 +16,7 @@
56341 #include <linux/kobject.h>
56342 #include <linux/moduleparam.h>
56343 #include <linux/tracepoint.h>
56344+#include <linux/fs.h>
56345
56346 #include <linux/percpu.h>
56347 #include <asm/module.h>
56348@@ -325,19 +326,16 @@ struct module
56349 int (*init)(void);
56350
56351 /* If this is non-NULL, vfree after init() returns */
56352- void *module_init;
56353+ void *module_init_rx, *module_init_rw;
56354
56355 /* Here is the actual code + data, vfree'd on unload. */
56356- void *module_core;
56357+ void *module_core_rx, *module_core_rw;
56358
56359 /* Here are the sizes of the init and core sections */
56360- unsigned int init_size, core_size;
56361+ unsigned int init_size_rw, core_size_rw;
56362
56363 /* The size of the executable code in each section. */
56364- unsigned int init_text_size, core_text_size;
56365-
56366- /* Size of RO sections of the module (text+rodata) */
56367- unsigned int init_ro_size, core_ro_size;
56368+ unsigned int init_size_rx, core_size_rx;
56369
56370 /* Arch-specific module values */
56371 struct mod_arch_specific arch;
56372@@ -393,6 +391,10 @@ struct module
56373 #ifdef CONFIG_EVENT_TRACING
56374 struct ftrace_event_call **trace_events;
56375 unsigned int num_trace_events;
56376+ struct file_operations trace_id;
56377+ struct file_operations trace_enable;
56378+ struct file_operations trace_format;
56379+ struct file_operations trace_filter;
56380 #endif
56381 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
56382 unsigned int num_ftrace_callsites;
56383@@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
56384 bool is_module_percpu_address(unsigned long addr);
56385 bool is_module_text_address(unsigned long addr);
56386
56387+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
56388+{
56389+
56390+#ifdef CONFIG_PAX_KERNEXEC
56391+ if (ktla_ktva(addr) >= (unsigned long)start &&
56392+ ktla_ktva(addr) < (unsigned long)start + size)
56393+ return 1;
56394+#endif
56395+
56396+ return ((void *)addr >= start && (void *)addr < start + size);
56397+}
56398+
56399+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
56400+{
56401+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
56402+}
56403+
56404+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
56405+{
56406+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
56407+}
56408+
56409+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
56410+{
56411+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
56412+}
56413+
56414+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
56415+{
56416+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
56417+}
56418+
56419 static inline int within_module_core(unsigned long addr, struct module *mod)
56420 {
56421- return (unsigned long)mod->module_core <= addr &&
56422- addr < (unsigned long)mod->module_core + mod->core_size;
56423+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
56424 }
56425
56426 static inline int within_module_init(unsigned long addr, struct module *mod)
56427 {
56428- return (unsigned long)mod->module_init <= addr &&
56429- addr < (unsigned long)mod->module_init + mod->init_size;
56430+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
56431 }
56432
56433 /* Search for module by name: must hold module_mutex. */
56434diff -urNp linux-3.0.4/include/linux/moduleloader.h linux-3.0.4/include/linux/moduleloader.h
56435--- linux-3.0.4/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
56436+++ linux-3.0.4/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
56437@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
56438 sections. Returns NULL on failure. */
56439 void *module_alloc(unsigned long size);
56440
56441+#ifdef CONFIG_PAX_KERNEXEC
56442+void *module_alloc_exec(unsigned long size);
56443+#else
56444+#define module_alloc_exec(x) module_alloc(x)
56445+#endif
56446+
56447 /* Free memory returned from module_alloc. */
56448 void module_free(struct module *mod, void *module_region);
56449
56450+#ifdef CONFIG_PAX_KERNEXEC
56451+void module_free_exec(struct module *mod, void *module_region);
56452+#else
56453+#define module_free_exec(x, y) module_free((x), (y))
56454+#endif
56455+
56456 /* Apply the given relocation to the (simplified) ELF. Return -error
56457 or 0. */
56458 int apply_relocate(Elf_Shdr *sechdrs,
56459diff -urNp linux-3.0.4/include/linux/moduleparam.h linux-3.0.4/include/linux/moduleparam.h
56460--- linux-3.0.4/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
56461+++ linux-3.0.4/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
56462@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
56463 * @len is usually just sizeof(string).
56464 */
56465 #define module_param_string(name, string, len, perm) \
56466- static const struct kparam_string __param_string_##name \
56467+ static const struct kparam_string __param_string_##name __used \
56468 = { len, string }; \
56469 __module_param_call(MODULE_PARAM_PREFIX, name, \
56470 &param_ops_string, \
56471@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
56472 * module_param_named() for why this might be necessary.
56473 */
56474 #define module_param_array_named(name, array, type, nump, perm) \
56475- static const struct kparam_array __param_arr_##name \
56476+ static const struct kparam_array __param_arr_##name __used \
56477 = { .max = ARRAY_SIZE(array), .num = nump, \
56478 .ops = &param_ops_##type, \
56479 .elemsize = sizeof(array[0]), .elem = array }; \
56480diff -urNp linux-3.0.4/include/linux/namei.h linux-3.0.4/include/linux/namei.h
56481--- linux-3.0.4/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
56482+++ linux-3.0.4/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
56483@@ -24,7 +24,7 @@ struct nameidata {
56484 unsigned seq;
56485 int last_type;
56486 unsigned depth;
56487- char *saved_names[MAX_NESTED_LINKS + 1];
56488+ const char *saved_names[MAX_NESTED_LINKS + 1];
56489
56490 /* Intent data */
56491 union {
56492@@ -91,12 +91,12 @@ extern int follow_up(struct path *);
56493 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
56494 extern void unlock_rename(struct dentry *, struct dentry *);
56495
56496-static inline void nd_set_link(struct nameidata *nd, char *path)
56497+static inline void nd_set_link(struct nameidata *nd, const char *path)
56498 {
56499 nd->saved_names[nd->depth] = path;
56500 }
56501
56502-static inline char *nd_get_link(struct nameidata *nd)
56503+static inline const char *nd_get_link(const struct nameidata *nd)
56504 {
56505 return nd->saved_names[nd->depth];
56506 }
56507diff -urNp linux-3.0.4/include/linux/netdevice.h linux-3.0.4/include/linux/netdevice.h
56508--- linux-3.0.4/include/linux/netdevice.h 2011-09-02 18:11:21.000000000 -0400
56509+++ linux-3.0.4/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
56510@@ -979,6 +979,7 @@ struct net_device_ops {
56511 int (*ndo_set_features)(struct net_device *dev,
56512 u32 features);
56513 };
56514+typedef struct net_device_ops __no_const net_device_ops_no_const;
56515
56516 /*
56517 * The DEVICE structure.
56518diff -urNp linux-3.0.4/include/linux/netfilter/xt_gradm.h linux-3.0.4/include/linux/netfilter/xt_gradm.h
56519--- linux-3.0.4/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
56520+++ linux-3.0.4/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
56521@@ -0,0 +1,9 @@
56522+#ifndef _LINUX_NETFILTER_XT_GRADM_H
56523+#define _LINUX_NETFILTER_XT_GRADM_H 1
56524+
56525+struct xt_gradm_mtinfo {
56526+ __u16 flags;
56527+ __u16 invflags;
56528+};
56529+
56530+#endif
56531diff -urNp linux-3.0.4/include/linux/of_pdt.h linux-3.0.4/include/linux/of_pdt.h
56532--- linux-3.0.4/include/linux/of_pdt.h 2011-07-21 22:17:23.000000000 -0400
56533+++ linux-3.0.4/include/linux/of_pdt.h 2011-08-30 06:20:11.000000000 -0400
56534@@ -32,7 +32,7 @@ struct of_pdt_ops {
56535
56536 /* return 0 on success; fill in 'len' with number of bytes in path */
56537 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
56538-};
56539+} __no_const;
56540
56541 extern void *prom_early_alloc(unsigned long size);
56542
56543diff -urNp linux-3.0.4/include/linux/oprofile.h linux-3.0.4/include/linux/oprofile.h
56544--- linux-3.0.4/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
56545+++ linux-3.0.4/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
56546@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
56547 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
56548 char const * name, ulong * val);
56549
56550-/** Create a file for read-only access to an atomic_t. */
56551+/** Create a file for read-only access to an atomic_unchecked_t. */
56552 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
56553- char const * name, atomic_t * val);
56554+ char const * name, atomic_unchecked_t * val);
56555
56556 /** create a directory */
56557 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
56558diff -urNp linux-3.0.4/include/linux/padata.h linux-3.0.4/include/linux/padata.h
56559--- linux-3.0.4/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
56560+++ linux-3.0.4/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
56561@@ -129,7 +129,7 @@ struct parallel_data {
56562 struct padata_instance *pinst;
56563 struct padata_parallel_queue __percpu *pqueue;
56564 struct padata_serial_queue __percpu *squeue;
56565- atomic_t seq_nr;
56566+ atomic_unchecked_t seq_nr;
56567 atomic_t reorder_objects;
56568 atomic_t refcnt;
56569 unsigned int max_seq_nr;
56570diff -urNp linux-3.0.4/include/linux/perf_event.h linux-3.0.4/include/linux/perf_event.h
56571--- linux-3.0.4/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
56572+++ linux-3.0.4/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
56573@@ -761,8 +761,8 @@ struct perf_event {
56574
56575 enum perf_event_active_state state;
56576 unsigned int attach_state;
56577- local64_t count;
56578- atomic64_t child_count;
56579+ local64_t count; /* PaX: fix it one day */
56580+ atomic64_unchecked_t child_count;
56581
56582 /*
56583 * These are the total time in nanoseconds that the event
56584@@ -813,8 +813,8 @@ struct perf_event {
56585 * These accumulate total time (in nanoseconds) that children
56586 * events have been enabled and running, respectively.
56587 */
56588- atomic64_t child_total_time_enabled;
56589- atomic64_t child_total_time_running;
56590+ atomic64_unchecked_t child_total_time_enabled;
56591+ atomic64_unchecked_t child_total_time_running;
56592
56593 /*
56594 * Protect attach/detach and child_list:
56595diff -urNp linux-3.0.4/include/linux/pipe_fs_i.h linux-3.0.4/include/linux/pipe_fs_i.h
56596--- linux-3.0.4/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
56597+++ linux-3.0.4/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
56598@@ -46,9 +46,9 @@ struct pipe_buffer {
56599 struct pipe_inode_info {
56600 wait_queue_head_t wait;
56601 unsigned int nrbufs, curbuf, buffers;
56602- unsigned int readers;
56603- unsigned int writers;
56604- unsigned int waiting_writers;
56605+ atomic_t readers;
56606+ atomic_t writers;
56607+ atomic_t waiting_writers;
56608 unsigned int r_counter;
56609 unsigned int w_counter;
56610 struct page *tmp_page;
56611diff -urNp linux-3.0.4/include/linux/pm_runtime.h linux-3.0.4/include/linux/pm_runtime.h
56612--- linux-3.0.4/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
56613+++ linux-3.0.4/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
56614@@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
56615
56616 static inline void pm_runtime_mark_last_busy(struct device *dev)
56617 {
56618- ACCESS_ONCE(dev->power.last_busy) = jiffies;
56619+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
56620 }
56621
56622 #else /* !CONFIG_PM_RUNTIME */
56623diff -urNp linux-3.0.4/include/linux/poison.h linux-3.0.4/include/linux/poison.h
56624--- linux-3.0.4/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
56625+++ linux-3.0.4/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
56626@@ -19,8 +19,8 @@
56627 * under normal circumstances, used to verify that nobody uses
56628 * non-initialized list entries.
56629 */
56630-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
56631-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
56632+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
56633+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
56634
56635 /********** include/linux/timer.h **********/
56636 /*
56637diff -urNp linux-3.0.4/include/linux/preempt.h linux-3.0.4/include/linux/preempt.h
56638--- linux-3.0.4/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
56639+++ linux-3.0.4/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
56640@@ -115,7 +115,7 @@ struct preempt_ops {
56641 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
56642 void (*sched_out)(struct preempt_notifier *notifier,
56643 struct task_struct *next);
56644-};
56645+} __no_const;
56646
56647 /**
56648 * preempt_notifier - key for installing preemption notifiers
56649diff -urNp linux-3.0.4/include/linux/proc_fs.h linux-3.0.4/include/linux/proc_fs.h
56650--- linux-3.0.4/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
56651+++ linux-3.0.4/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
56652@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
56653 return proc_create_data(name, mode, parent, proc_fops, NULL);
56654 }
56655
56656+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
56657+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
56658+{
56659+#ifdef CONFIG_GRKERNSEC_PROC_USER
56660+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
56661+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56662+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
56663+#else
56664+ return proc_create_data(name, mode, parent, proc_fops, NULL);
56665+#endif
56666+}
56667+
56668+
56669 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
56670 mode_t mode, struct proc_dir_entry *base,
56671 read_proc_t *read_proc, void * data)
56672@@ -258,7 +271,7 @@ union proc_op {
56673 int (*proc_show)(struct seq_file *m,
56674 struct pid_namespace *ns, struct pid *pid,
56675 struct task_struct *task);
56676-};
56677+} __no_const;
56678
56679 struct ctl_table_header;
56680 struct ctl_table;
56681diff -urNp linux-3.0.4/include/linux/ptrace.h linux-3.0.4/include/linux/ptrace.h
56682--- linux-3.0.4/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
56683+++ linux-3.0.4/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
56684@@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
56685 extern void exit_ptrace(struct task_struct *tracer);
56686 #define PTRACE_MODE_READ 1
56687 #define PTRACE_MODE_ATTACH 2
56688-/* Returns 0 on success, -errno on denial. */
56689-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
56690 /* Returns true on success, false on denial. */
56691 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
56692+/* Returns true on success, false on denial. */
56693+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
56694
56695 static inline int ptrace_reparented(struct task_struct *child)
56696 {
56697diff -urNp linux-3.0.4/include/linux/random.h linux-3.0.4/include/linux/random.h
56698--- linux-3.0.4/include/linux/random.h 2011-09-02 18:11:21.000000000 -0400
56699+++ linux-3.0.4/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
56700@@ -69,12 +69,17 @@ void srandom32(u32 seed);
56701
56702 u32 prandom32(struct rnd_state *);
56703
56704+static inline unsigned long pax_get_random_long(void)
56705+{
56706+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
56707+}
56708+
56709 /*
56710 * Handle minimum values for seeds
56711 */
56712 static inline u32 __seed(u32 x, u32 m)
56713 {
56714- return (x < m) ? x + m : x;
56715+ return (x <= m) ? x + m + 1 : x;
56716 }
56717
56718 /**
56719diff -urNp linux-3.0.4/include/linux/reboot.h linux-3.0.4/include/linux/reboot.h
56720--- linux-3.0.4/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
56721+++ linux-3.0.4/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
56722@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
56723 * Architecture-specific implementations of sys_reboot commands.
56724 */
56725
56726-extern void machine_restart(char *cmd);
56727-extern void machine_halt(void);
56728-extern void machine_power_off(void);
56729+extern void machine_restart(char *cmd) __noreturn;
56730+extern void machine_halt(void) __noreturn;
56731+extern void machine_power_off(void) __noreturn;
56732
56733 extern void machine_shutdown(void);
56734 struct pt_regs;
56735@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
56736 */
56737
56738 extern void kernel_restart_prepare(char *cmd);
56739-extern void kernel_restart(char *cmd);
56740-extern void kernel_halt(void);
56741-extern void kernel_power_off(void);
56742+extern void kernel_restart(char *cmd) __noreturn;
56743+extern void kernel_halt(void) __noreturn;
56744+extern void kernel_power_off(void) __noreturn;
56745
56746 extern int C_A_D; /* for sysctl */
56747 void ctrl_alt_del(void);
56748@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
56749 * Emergency restart, callable from an interrupt handler.
56750 */
56751
56752-extern void emergency_restart(void);
56753+extern void emergency_restart(void) __noreturn;
56754 #include <asm/emergency-restart.h>
56755
56756 #endif
56757diff -urNp linux-3.0.4/include/linux/reiserfs_fs.h linux-3.0.4/include/linux/reiserfs_fs.h
56758--- linux-3.0.4/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
56759+++ linux-3.0.4/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
56760@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
56761 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
56762
56763 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
56764-#define get_generation(s) atomic_read (&fs_generation(s))
56765+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
56766 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
56767 #define __fs_changed(gen,s) (gen != get_generation (s))
56768 #define fs_changed(gen,s) \
56769diff -urNp linux-3.0.4/include/linux/reiserfs_fs_sb.h linux-3.0.4/include/linux/reiserfs_fs_sb.h
56770--- linux-3.0.4/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
56771+++ linux-3.0.4/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
56772@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
56773 /* Comment? -Hans */
56774 wait_queue_head_t s_wait;
56775 /* To be obsoleted soon by per buffer seals.. -Hans */
56776- atomic_t s_generation_counter; // increased by one every time the
56777+ atomic_unchecked_t s_generation_counter; // increased by one every time the
56778 // tree gets re-balanced
56779 unsigned long s_properties; /* File system properties. Currently holds
56780 on-disk FS format */
56781diff -urNp linux-3.0.4/include/linux/relay.h linux-3.0.4/include/linux/relay.h
56782--- linux-3.0.4/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
56783+++ linux-3.0.4/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
56784@@ -159,7 +159,7 @@ struct rchan_callbacks
56785 * The callback should return 0 if successful, negative if not.
56786 */
56787 int (*remove_buf_file)(struct dentry *dentry);
56788-};
56789+} __no_const;
56790
56791 /*
56792 * CONFIG_RELAY kernel API, kernel/relay.c
56793diff -urNp linux-3.0.4/include/linux/rfkill.h linux-3.0.4/include/linux/rfkill.h
56794--- linux-3.0.4/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
56795+++ linux-3.0.4/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
56796@@ -147,6 +147,7 @@ struct rfkill_ops {
56797 void (*query)(struct rfkill *rfkill, void *data);
56798 int (*set_block)(void *data, bool blocked);
56799 };
56800+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
56801
56802 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
56803 /**
56804diff -urNp linux-3.0.4/include/linux/rmap.h linux-3.0.4/include/linux/rmap.h
56805--- linux-3.0.4/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
56806+++ linux-3.0.4/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
56807@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
56808 void anon_vma_init(void); /* create anon_vma_cachep */
56809 int anon_vma_prepare(struct vm_area_struct *);
56810 void unlink_anon_vmas(struct vm_area_struct *);
56811-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
56812-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
56813+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
56814+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
56815 void __anon_vma_link(struct vm_area_struct *);
56816
56817 static inline void anon_vma_merge(struct vm_area_struct *vma,
56818diff -urNp linux-3.0.4/include/linux/sched.h linux-3.0.4/include/linux/sched.h
56819--- linux-3.0.4/include/linux/sched.h 2011-07-21 22:17:23.000000000 -0400
56820+++ linux-3.0.4/include/linux/sched.h 2011-08-25 17:22:27.000000000 -0400
56821@@ -100,6 +100,7 @@ struct bio_list;
56822 struct fs_struct;
56823 struct perf_event_context;
56824 struct blk_plug;
56825+struct linux_binprm;
56826
56827 /*
56828 * List of flags we want to share for kernel threads,
56829@@ -380,10 +381,13 @@ struct user_namespace;
56830 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
56831
56832 extern int sysctl_max_map_count;
56833+extern unsigned long sysctl_heap_stack_gap;
56834
56835 #include <linux/aio.h>
56836
56837 #ifdef CONFIG_MMU
56838+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
56839+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
56840 extern void arch_pick_mmap_layout(struct mm_struct *mm);
56841 extern unsigned long
56842 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
56843@@ -629,6 +633,17 @@ struct signal_struct {
56844 #ifdef CONFIG_TASKSTATS
56845 struct taskstats *stats;
56846 #endif
56847+
56848+#ifdef CONFIG_GRKERNSEC
56849+ u32 curr_ip;
56850+ u32 saved_ip;
56851+ u32 gr_saddr;
56852+ u32 gr_daddr;
56853+ u16 gr_sport;
56854+ u16 gr_dport;
56855+ u8 used_accept:1;
56856+#endif
56857+
56858 #ifdef CONFIG_AUDIT
56859 unsigned audit_tty;
56860 struct tty_audit_buf *tty_audit_buf;
56861@@ -710,6 +725,11 @@ struct user_struct {
56862 struct key *session_keyring; /* UID's default session keyring */
56863 #endif
56864
56865+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56866+ unsigned int banned;
56867+ unsigned long ban_expires;
56868+#endif
56869+
56870 /* Hash table maintenance information */
56871 struct hlist_node uidhash_node;
56872 uid_t uid;
56873@@ -1340,8 +1360,8 @@ struct task_struct {
56874 struct list_head thread_group;
56875
56876 struct completion *vfork_done; /* for vfork() */
56877- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
56878- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56879+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
56880+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56881
56882 cputime_t utime, stime, utimescaled, stimescaled;
56883 cputime_t gtime;
56884@@ -1357,13 +1377,6 @@ struct task_struct {
56885 struct task_cputime cputime_expires;
56886 struct list_head cpu_timers[3];
56887
56888-/* process credentials */
56889- const struct cred __rcu *real_cred; /* objective and real subjective task
56890- * credentials (COW) */
56891- const struct cred __rcu *cred; /* effective (overridable) subjective task
56892- * credentials (COW) */
56893- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56894-
56895 char comm[TASK_COMM_LEN]; /* executable name excluding path
56896 - access with [gs]et_task_comm (which lock
56897 it with task_lock())
56898@@ -1380,8 +1393,16 @@ struct task_struct {
56899 #endif
56900 /* CPU-specific state of this task */
56901 struct thread_struct thread;
56902+/* thread_info moved to task_struct */
56903+#ifdef CONFIG_X86
56904+ struct thread_info tinfo;
56905+#endif
56906 /* filesystem information */
56907 struct fs_struct *fs;
56908+
56909+ const struct cred __rcu *cred; /* effective (overridable) subjective task
56910+ * credentials (COW) */
56911+
56912 /* open file information */
56913 struct files_struct *files;
56914 /* namespaces */
56915@@ -1428,6 +1449,11 @@ struct task_struct {
56916 struct rt_mutex_waiter *pi_blocked_on;
56917 #endif
56918
56919+/* process credentials */
56920+ const struct cred __rcu *real_cred; /* objective and real subjective task
56921+ * credentials (COW) */
56922+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56923+
56924 #ifdef CONFIG_DEBUG_MUTEXES
56925 /* mutex deadlock detection */
56926 struct mutex_waiter *blocked_on;
56927@@ -1538,6 +1564,21 @@ struct task_struct {
56928 unsigned long default_timer_slack_ns;
56929
56930 struct list_head *scm_work_list;
56931+
56932+#ifdef CONFIG_GRKERNSEC
56933+ /* grsecurity */
56934+ struct dentry *gr_chroot_dentry;
56935+ struct acl_subject_label *acl;
56936+ struct acl_role_label *role;
56937+ struct file *exec_file;
56938+ u16 acl_role_id;
56939+ /* is this the task that authenticated to the special role */
56940+ u8 acl_sp_role;
56941+ u8 is_writable;
56942+ u8 brute;
56943+ u8 gr_is_chrooted;
56944+#endif
56945+
56946 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
56947 /* Index of current stored address in ret_stack */
56948 int curr_ret_stack;
56949@@ -1572,6 +1613,57 @@ struct task_struct {
56950 #endif
56951 };
56952
56953+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
56954+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
56955+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
56956+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
56957+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
56958+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
56959+
56960+#ifdef CONFIG_PAX_SOFTMODE
56961+extern int pax_softmode;
56962+#endif
56963+
56964+extern int pax_check_flags(unsigned long *);
56965+
56966+/* if tsk != current then task_lock must be held on it */
56967+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56968+static inline unsigned long pax_get_flags(struct task_struct *tsk)
56969+{
56970+ if (likely(tsk->mm))
56971+ return tsk->mm->pax_flags;
56972+ else
56973+ return 0UL;
56974+}
56975+
56976+/* if tsk != current then task_lock must be held on it */
56977+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
56978+{
56979+ if (likely(tsk->mm)) {
56980+ tsk->mm->pax_flags = flags;
56981+ return 0;
56982+ }
56983+ return -EINVAL;
56984+}
56985+#endif
56986+
56987+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56988+extern void pax_set_initial_flags(struct linux_binprm *bprm);
56989+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
56990+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
56991+#endif
56992+
56993+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
56994+extern void pax_report_insns(void *pc, void *sp);
56995+extern void pax_report_refcount_overflow(struct pt_regs *regs);
56996+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
56997+
56998+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
56999+extern void pax_track_stack(void);
57000+#else
57001+static inline void pax_track_stack(void) {}
57002+#endif
57003+
57004 /* Future-safe accessor for struct task_struct's cpus_allowed. */
57005 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
57006
57007@@ -1768,6 +1860,7 @@ extern void thread_group_times(struct ta
57008 #define PF_DUMPCORE 0x00000200 /* dumped core */
57009 #define PF_SIGNALED 0x00000400 /* killed by a signal */
57010 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
57011+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
57012 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
57013 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
57014 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
57015@@ -2056,7 +2149,9 @@ void yield(void);
57016 extern struct exec_domain default_exec_domain;
57017
57018 union thread_union {
57019+#ifndef CONFIG_X86
57020 struct thread_info thread_info;
57021+#endif
57022 unsigned long stack[THREAD_SIZE/sizeof(long)];
57023 };
57024
57025@@ -2089,6 +2184,7 @@ extern struct pid_namespace init_pid_ns;
57026 */
57027
57028 extern struct task_struct *find_task_by_vpid(pid_t nr);
57029+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
57030 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
57031 struct pid_namespace *ns);
57032
57033@@ -2225,7 +2321,7 @@ extern void __cleanup_sighand(struct sig
57034 extern void exit_itimers(struct signal_struct *);
57035 extern void flush_itimer_signals(void);
57036
57037-extern NORET_TYPE void do_group_exit(int);
57038+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
57039
57040 extern void daemonize(const char *, ...);
57041 extern int allow_signal(int);
57042@@ -2393,13 +2489,17 @@ static inline unsigned long *end_of_stac
57043
57044 #endif
57045
57046-static inline int object_is_on_stack(void *obj)
57047+static inline int object_starts_on_stack(void *obj)
57048 {
57049- void *stack = task_stack_page(current);
57050+ const void *stack = task_stack_page(current);
57051
57052 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
57053 }
57054
57055+#ifdef CONFIG_PAX_USERCOPY
57056+extern int object_is_on_stack(const void *obj, unsigned long len);
57057+#endif
57058+
57059 extern void thread_info_cache_init(void);
57060
57061 #ifdef CONFIG_DEBUG_STACK_USAGE
57062diff -urNp linux-3.0.4/include/linux/screen_info.h linux-3.0.4/include/linux/screen_info.h
57063--- linux-3.0.4/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
57064+++ linux-3.0.4/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
57065@@ -43,7 +43,8 @@ struct screen_info {
57066 __u16 pages; /* 0x32 */
57067 __u16 vesa_attributes; /* 0x34 */
57068 __u32 capabilities; /* 0x36 */
57069- __u8 _reserved[6]; /* 0x3a */
57070+ __u16 vesapm_size; /* 0x3a */
57071+ __u8 _reserved[4]; /* 0x3c */
57072 } __attribute__((packed));
57073
57074 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
57075diff -urNp linux-3.0.4/include/linux/security.h linux-3.0.4/include/linux/security.h
57076--- linux-3.0.4/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
57077+++ linux-3.0.4/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
57078@@ -36,6 +36,7 @@
57079 #include <linux/key.h>
57080 #include <linux/xfrm.h>
57081 #include <linux/slab.h>
57082+#include <linux/grsecurity.h>
57083 #include <net/flow.h>
57084
57085 /* Maximum number of letters for an LSM name string */
57086diff -urNp linux-3.0.4/include/linux/seq_file.h linux-3.0.4/include/linux/seq_file.h
57087--- linux-3.0.4/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
57088+++ linux-3.0.4/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
57089@@ -32,6 +32,7 @@ struct seq_operations {
57090 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
57091 int (*show) (struct seq_file *m, void *v);
57092 };
57093+typedef struct seq_operations __no_const seq_operations_no_const;
57094
57095 #define SEQ_SKIP 1
57096
57097diff -urNp linux-3.0.4/include/linux/shmem_fs.h linux-3.0.4/include/linux/shmem_fs.h
57098--- linux-3.0.4/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
57099+++ linux-3.0.4/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
57100@@ -10,7 +10,7 @@
57101
57102 #define SHMEM_NR_DIRECT 16
57103
57104-#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
57105+#define SHMEM_SYMLINK_INLINE_LEN 64
57106
57107 struct shmem_inode_info {
57108 spinlock_t lock;
57109diff -urNp linux-3.0.4/include/linux/shm.h linux-3.0.4/include/linux/shm.h
57110--- linux-3.0.4/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
57111+++ linux-3.0.4/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
57112@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
57113 pid_t shm_cprid;
57114 pid_t shm_lprid;
57115 struct user_struct *mlock_user;
57116+#ifdef CONFIG_GRKERNSEC
57117+ time_t shm_createtime;
57118+ pid_t shm_lapid;
57119+#endif
57120 };
57121
57122 /* shm_mode upper byte flags */
57123diff -urNp linux-3.0.4/include/linux/skbuff.h linux-3.0.4/include/linux/skbuff.h
57124--- linux-3.0.4/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
57125+++ linux-3.0.4/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
57126@@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
57127 */
57128 static inline int skb_queue_empty(const struct sk_buff_head *list)
57129 {
57130- return list->next == (struct sk_buff *)list;
57131+ return list->next == (const struct sk_buff *)list;
57132 }
57133
57134 /**
57135@@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
57136 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
57137 const struct sk_buff *skb)
57138 {
57139- return skb->next == (struct sk_buff *)list;
57140+ return skb->next == (const struct sk_buff *)list;
57141 }
57142
57143 /**
57144@@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
57145 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
57146 const struct sk_buff *skb)
57147 {
57148- return skb->prev == (struct sk_buff *)list;
57149+ return skb->prev == (const struct sk_buff *)list;
57150 }
57151
57152 /**
57153@@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
57154 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
57155 */
57156 #ifndef NET_SKB_PAD
57157-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
57158+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
57159 #endif
57160
57161 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
57162diff -urNp linux-3.0.4/include/linux/slab_def.h linux-3.0.4/include/linux/slab_def.h
57163--- linux-3.0.4/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
57164+++ linux-3.0.4/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
57165@@ -96,10 +96,10 @@ struct kmem_cache {
57166 unsigned long node_allocs;
57167 unsigned long node_frees;
57168 unsigned long node_overflow;
57169- atomic_t allochit;
57170- atomic_t allocmiss;
57171- atomic_t freehit;
57172- atomic_t freemiss;
57173+ atomic_unchecked_t allochit;
57174+ atomic_unchecked_t allocmiss;
57175+ atomic_unchecked_t freehit;
57176+ atomic_unchecked_t freemiss;
57177
57178 /*
57179 * If debugging is enabled, then the allocator can add additional
57180diff -urNp linux-3.0.4/include/linux/slab.h linux-3.0.4/include/linux/slab.h
57181--- linux-3.0.4/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
57182+++ linux-3.0.4/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
57183@@ -11,12 +11,20 @@
57184
57185 #include <linux/gfp.h>
57186 #include <linux/types.h>
57187+#include <linux/err.h>
57188
57189 /*
57190 * Flags to pass to kmem_cache_create().
57191 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
57192 */
57193 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
57194+
57195+#ifdef CONFIG_PAX_USERCOPY
57196+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
57197+#else
57198+#define SLAB_USERCOPY 0x00000000UL
57199+#endif
57200+
57201 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
57202 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
57203 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
57204@@ -87,10 +95,13 @@
57205 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
57206 * Both make kfree a no-op.
57207 */
57208-#define ZERO_SIZE_PTR ((void *)16)
57209+#define ZERO_SIZE_PTR \
57210+({ \
57211+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
57212+ (void *)(-MAX_ERRNO-1L); \
57213+})
57214
57215-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
57216- (unsigned long)ZERO_SIZE_PTR)
57217+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
57218
57219 /*
57220 * struct kmem_cache related prototypes
57221@@ -141,6 +152,7 @@ void * __must_check krealloc(const void
57222 void kfree(const void *);
57223 void kzfree(const void *);
57224 size_t ksize(const void *);
57225+void check_object_size(const void *ptr, unsigned long n, bool to);
57226
57227 /*
57228 * Allocator specific definitions. These are mainly used to establish optimized
57229@@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
57230
57231 void __init kmem_cache_init_late(void);
57232
57233+#define kmalloc(x, y) \
57234+({ \
57235+ void *___retval; \
57236+ intoverflow_t ___x = (intoverflow_t)x; \
57237+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
57238+ ___retval = NULL; \
57239+ else \
57240+ ___retval = kmalloc((size_t)___x, (y)); \
57241+ ___retval; \
57242+})
57243+
57244+#define kmalloc_node(x, y, z) \
57245+({ \
57246+ void *___retval; \
57247+ intoverflow_t ___x = (intoverflow_t)x; \
57248+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
57249+ ___retval = NULL; \
57250+ else \
57251+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
57252+ ___retval; \
57253+})
57254+
57255+#define kzalloc(x, y) \
57256+({ \
57257+ void *___retval; \
57258+ intoverflow_t ___x = (intoverflow_t)x; \
57259+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
57260+ ___retval = NULL; \
57261+ else \
57262+ ___retval = kzalloc((size_t)___x, (y)); \
57263+ ___retval; \
57264+})
57265+
57266+#define __krealloc(x, y, z) \
57267+({ \
57268+ void *___retval; \
57269+ intoverflow_t ___y = (intoverflow_t)y; \
57270+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
57271+ ___retval = NULL; \
57272+ else \
57273+ ___retval = __krealloc((x), (size_t)___y, (z)); \
57274+ ___retval; \
57275+})
57276+
57277+#define krealloc(x, y, z) \
57278+({ \
57279+ void *___retval; \
57280+ intoverflow_t ___y = (intoverflow_t)y; \
57281+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
57282+ ___retval = NULL; \
57283+ else \
57284+ ___retval = krealloc((x), (size_t)___y, (z)); \
57285+ ___retval; \
57286+})
57287+
57288 #endif /* _LINUX_SLAB_H */
57289diff -urNp linux-3.0.4/include/linux/slub_def.h linux-3.0.4/include/linux/slub_def.h
57290--- linux-3.0.4/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
57291+++ linux-3.0.4/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
57292@@ -82,7 +82,7 @@ struct kmem_cache {
57293 struct kmem_cache_order_objects max;
57294 struct kmem_cache_order_objects min;
57295 gfp_t allocflags; /* gfp flags to use on each alloc */
57296- int refcount; /* Refcount for slab cache destroy */
57297+ atomic_t refcount; /* Refcount for slab cache destroy */
57298 void (*ctor)(void *);
57299 int inuse; /* Offset to metadata */
57300 int align; /* Alignment */
57301@@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
57302 }
57303
57304 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
57305-void *__kmalloc(size_t size, gfp_t flags);
57306+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
57307
57308 static __always_inline void *
57309 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
57310diff -urNp linux-3.0.4/include/linux/sonet.h linux-3.0.4/include/linux/sonet.h
57311--- linux-3.0.4/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
57312+++ linux-3.0.4/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
57313@@ -61,7 +61,7 @@ struct sonet_stats {
57314 #include <asm/atomic.h>
57315
57316 struct k_sonet_stats {
57317-#define __HANDLE_ITEM(i) atomic_t i
57318+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57319 __SONET_ITEMS
57320 #undef __HANDLE_ITEM
57321 };
57322diff -urNp linux-3.0.4/include/linux/sunrpc/clnt.h linux-3.0.4/include/linux/sunrpc/clnt.h
57323--- linux-3.0.4/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
57324+++ linux-3.0.4/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
57325@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
57326 {
57327 switch (sap->sa_family) {
57328 case AF_INET:
57329- return ntohs(((struct sockaddr_in *)sap)->sin_port);
57330+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
57331 case AF_INET6:
57332- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
57333+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
57334 }
57335 return 0;
57336 }
57337@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
57338 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
57339 const struct sockaddr *src)
57340 {
57341- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
57342+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
57343 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
57344
57345 dsin->sin_family = ssin->sin_family;
57346@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
57347 if (sa->sa_family != AF_INET6)
57348 return 0;
57349
57350- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
57351+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
57352 }
57353
57354 #endif /* __KERNEL__ */
57355diff -urNp linux-3.0.4/include/linux/sunrpc/svc_rdma.h linux-3.0.4/include/linux/sunrpc/svc_rdma.h
57356--- linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
57357+++ linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
57358@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
57359 extern unsigned int svcrdma_max_requests;
57360 extern unsigned int svcrdma_max_req_size;
57361
57362-extern atomic_t rdma_stat_recv;
57363-extern atomic_t rdma_stat_read;
57364-extern atomic_t rdma_stat_write;
57365-extern atomic_t rdma_stat_sq_starve;
57366-extern atomic_t rdma_stat_rq_starve;
57367-extern atomic_t rdma_stat_rq_poll;
57368-extern atomic_t rdma_stat_rq_prod;
57369-extern atomic_t rdma_stat_sq_poll;
57370-extern atomic_t rdma_stat_sq_prod;
57371+extern atomic_unchecked_t rdma_stat_recv;
57372+extern atomic_unchecked_t rdma_stat_read;
57373+extern atomic_unchecked_t rdma_stat_write;
57374+extern atomic_unchecked_t rdma_stat_sq_starve;
57375+extern atomic_unchecked_t rdma_stat_rq_starve;
57376+extern atomic_unchecked_t rdma_stat_rq_poll;
57377+extern atomic_unchecked_t rdma_stat_rq_prod;
57378+extern atomic_unchecked_t rdma_stat_sq_poll;
57379+extern atomic_unchecked_t rdma_stat_sq_prod;
57380
57381 #define RPCRDMA_VERSION 1
57382
57383diff -urNp linux-3.0.4/include/linux/sysctl.h linux-3.0.4/include/linux/sysctl.h
57384--- linux-3.0.4/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
57385+++ linux-3.0.4/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
57386@@ -155,7 +155,11 @@ enum
57387 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
57388 };
57389
57390-
57391+#ifdef CONFIG_PAX_SOFTMODE
57392+enum {
57393+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
57394+};
57395+#endif
57396
57397 /* CTL_VM names: */
57398 enum
57399@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
57400
57401 extern int proc_dostring(struct ctl_table *, int,
57402 void __user *, size_t *, loff_t *);
57403+extern int proc_dostring_modpriv(struct ctl_table *, int,
57404+ void __user *, size_t *, loff_t *);
57405 extern int proc_dointvec(struct ctl_table *, int,
57406 void __user *, size_t *, loff_t *);
57407 extern int proc_dointvec_minmax(struct ctl_table *, int,
57408diff -urNp linux-3.0.4/include/linux/tty_ldisc.h linux-3.0.4/include/linux/tty_ldisc.h
57409--- linux-3.0.4/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
57410+++ linux-3.0.4/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
57411@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
57412
57413 struct module *owner;
57414
57415- int refcount;
57416+ atomic_t refcount;
57417 };
57418
57419 struct tty_ldisc {
57420diff -urNp linux-3.0.4/include/linux/types.h linux-3.0.4/include/linux/types.h
57421--- linux-3.0.4/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
57422+++ linux-3.0.4/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
57423@@ -213,10 +213,26 @@ typedef struct {
57424 int counter;
57425 } atomic_t;
57426
57427+#ifdef CONFIG_PAX_REFCOUNT
57428+typedef struct {
57429+ int counter;
57430+} atomic_unchecked_t;
57431+#else
57432+typedef atomic_t atomic_unchecked_t;
57433+#endif
57434+
57435 #ifdef CONFIG_64BIT
57436 typedef struct {
57437 long counter;
57438 } atomic64_t;
57439+
57440+#ifdef CONFIG_PAX_REFCOUNT
57441+typedef struct {
57442+ long counter;
57443+} atomic64_unchecked_t;
57444+#else
57445+typedef atomic64_t atomic64_unchecked_t;
57446+#endif
57447 #endif
57448
57449 struct list_head {
57450diff -urNp linux-3.0.4/include/linux/uaccess.h linux-3.0.4/include/linux/uaccess.h
57451--- linux-3.0.4/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
57452+++ linux-3.0.4/include/linux/uaccess.h 2011-08-23 21:47:56.000000000 -0400
57453@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
57454 long ret; \
57455 mm_segment_t old_fs = get_fs(); \
57456 \
57457- set_fs(KERNEL_DS); \
57458 pagefault_disable(); \
57459+ set_fs(KERNEL_DS); \
57460 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
57461- pagefault_enable(); \
57462 set_fs(old_fs); \
57463+ pagefault_enable(); \
57464 ret; \
57465 })
57466
57467diff -urNp linux-3.0.4/include/linux/unaligned/access_ok.h linux-3.0.4/include/linux/unaligned/access_ok.h
57468--- linux-3.0.4/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
57469+++ linux-3.0.4/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
57470@@ -6,32 +6,32 @@
57471
57472 static inline u16 get_unaligned_le16(const void *p)
57473 {
57474- return le16_to_cpup((__le16 *)p);
57475+ return le16_to_cpup((const __le16 *)p);
57476 }
57477
57478 static inline u32 get_unaligned_le32(const void *p)
57479 {
57480- return le32_to_cpup((__le32 *)p);
57481+ return le32_to_cpup((const __le32 *)p);
57482 }
57483
57484 static inline u64 get_unaligned_le64(const void *p)
57485 {
57486- return le64_to_cpup((__le64 *)p);
57487+ return le64_to_cpup((const __le64 *)p);
57488 }
57489
57490 static inline u16 get_unaligned_be16(const void *p)
57491 {
57492- return be16_to_cpup((__be16 *)p);
57493+ return be16_to_cpup((const __be16 *)p);
57494 }
57495
57496 static inline u32 get_unaligned_be32(const void *p)
57497 {
57498- return be32_to_cpup((__be32 *)p);
57499+ return be32_to_cpup((const __be32 *)p);
57500 }
57501
57502 static inline u64 get_unaligned_be64(const void *p)
57503 {
57504- return be64_to_cpup((__be64 *)p);
57505+ return be64_to_cpup((const __be64 *)p);
57506 }
57507
57508 static inline void put_unaligned_le16(u16 val, void *p)
57509diff -urNp linux-3.0.4/include/linux/vmalloc.h linux-3.0.4/include/linux/vmalloc.h
57510--- linux-3.0.4/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
57511+++ linux-3.0.4/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
57512@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
57513 #define VM_MAP 0x00000004 /* vmap()ed pages */
57514 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
57515 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
57516+
57517+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
57518+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
57519+#endif
57520+
57521 /* bits [20..32] reserved for arch specific ioremap internals */
57522
57523 /*
57524@@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
57525 # endif
57526 #endif
57527
57528+#define vmalloc(x) \
57529+({ \
57530+ void *___retval; \
57531+ intoverflow_t ___x = (intoverflow_t)x; \
57532+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
57533+ ___retval = NULL; \
57534+ else \
57535+ ___retval = vmalloc((unsigned long)___x); \
57536+ ___retval; \
57537+})
57538+
57539+#define vzalloc(x) \
57540+({ \
57541+ void *___retval; \
57542+ intoverflow_t ___x = (intoverflow_t)x; \
57543+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
57544+ ___retval = NULL; \
57545+ else \
57546+ ___retval = vzalloc((unsigned long)___x); \
57547+ ___retval; \
57548+})
57549+
57550+#define __vmalloc(x, y, z) \
57551+({ \
57552+ void *___retval; \
57553+ intoverflow_t ___x = (intoverflow_t)x; \
57554+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
57555+ ___retval = NULL; \
57556+ else \
57557+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
57558+ ___retval; \
57559+})
57560+
57561+#define vmalloc_user(x) \
57562+({ \
57563+ void *___retval; \
57564+ intoverflow_t ___x = (intoverflow_t)x; \
57565+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
57566+ ___retval = NULL; \
57567+ else \
57568+ ___retval = vmalloc_user((unsigned long)___x); \
57569+ ___retval; \
57570+})
57571+
57572+#define vmalloc_exec(x) \
57573+({ \
57574+ void *___retval; \
57575+ intoverflow_t ___x = (intoverflow_t)x; \
57576+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
57577+ ___retval = NULL; \
57578+ else \
57579+ ___retval = vmalloc_exec((unsigned long)___x); \
57580+ ___retval; \
57581+})
57582+
57583+#define vmalloc_node(x, y) \
57584+({ \
57585+ void *___retval; \
57586+ intoverflow_t ___x = (intoverflow_t)x; \
57587+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
57588+ ___retval = NULL; \
57589+ else \
57590+ ___retval = vmalloc_node((unsigned long)___x, (y));\
57591+ ___retval; \
57592+})
57593+
57594+#define vzalloc_node(x, y) \
57595+({ \
57596+ void *___retval; \
57597+ intoverflow_t ___x = (intoverflow_t)x; \
57598+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
57599+ ___retval = NULL; \
57600+ else \
57601+ ___retval = vzalloc_node((unsigned long)___x, (y));\
57602+ ___retval; \
57603+})
57604+
57605+#define vmalloc_32(x) \
57606+({ \
57607+ void *___retval; \
57608+ intoverflow_t ___x = (intoverflow_t)x; \
57609+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
57610+ ___retval = NULL; \
57611+ else \
57612+ ___retval = vmalloc_32((unsigned long)___x); \
57613+ ___retval; \
57614+})
57615+
57616+#define vmalloc_32_user(x) \
57617+({ \
57618+void *___retval; \
57619+ intoverflow_t ___x = (intoverflow_t)x; \
57620+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
57621+ ___retval = NULL; \
57622+ else \
57623+ ___retval = vmalloc_32_user((unsigned long)___x);\
57624+ ___retval; \
57625+})
57626+
57627 #endif /* _LINUX_VMALLOC_H */
57628diff -urNp linux-3.0.4/include/linux/vmstat.h linux-3.0.4/include/linux/vmstat.h
57629--- linux-3.0.4/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
57630+++ linux-3.0.4/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
57631@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
57632 /*
57633 * Zone based page accounting with per cpu differentials.
57634 */
57635-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57636+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57637
57638 static inline void zone_page_state_add(long x, struct zone *zone,
57639 enum zone_stat_item item)
57640 {
57641- atomic_long_add(x, &zone->vm_stat[item]);
57642- atomic_long_add(x, &vm_stat[item]);
57643+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
57644+ atomic_long_add_unchecked(x, &vm_stat[item]);
57645 }
57646
57647 static inline unsigned long global_page_state(enum zone_stat_item item)
57648 {
57649- long x = atomic_long_read(&vm_stat[item]);
57650+ long x = atomic_long_read_unchecked(&vm_stat[item]);
57651 #ifdef CONFIG_SMP
57652 if (x < 0)
57653 x = 0;
57654@@ -109,7 +109,7 @@ static inline unsigned long global_page_
57655 static inline unsigned long zone_page_state(struct zone *zone,
57656 enum zone_stat_item item)
57657 {
57658- long x = atomic_long_read(&zone->vm_stat[item]);
57659+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57660 #ifdef CONFIG_SMP
57661 if (x < 0)
57662 x = 0;
57663@@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
57664 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
57665 enum zone_stat_item item)
57666 {
57667- long x = atomic_long_read(&zone->vm_stat[item]);
57668+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57669
57670 #ifdef CONFIG_SMP
57671 int cpu;
57672@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
57673
57674 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
57675 {
57676- atomic_long_inc(&zone->vm_stat[item]);
57677- atomic_long_inc(&vm_stat[item]);
57678+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
57679+ atomic_long_inc_unchecked(&vm_stat[item]);
57680 }
57681
57682 static inline void __inc_zone_page_state(struct page *page,
57683@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
57684
57685 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
57686 {
57687- atomic_long_dec(&zone->vm_stat[item]);
57688- atomic_long_dec(&vm_stat[item]);
57689+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
57690+ atomic_long_dec_unchecked(&vm_stat[item]);
57691 }
57692
57693 static inline void __dec_zone_page_state(struct page *page,
57694diff -urNp linux-3.0.4/include/media/saa7146_vv.h linux-3.0.4/include/media/saa7146_vv.h
57695--- linux-3.0.4/include/media/saa7146_vv.h 2011-07-21 22:17:23.000000000 -0400
57696+++ linux-3.0.4/include/media/saa7146_vv.h 2011-08-24 18:26:09.000000000 -0400
57697@@ -163,7 +163,7 @@ struct saa7146_ext_vv
57698 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
57699
57700 /* the extension can override this */
57701- struct v4l2_ioctl_ops ops;
57702+ v4l2_ioctl_ops_no_const ops;
57703 /* pointer to the saa7146 core ops */
57704 const struct v4l2_ioctl_ops *core_ops;
57705
57706diff -urNp linux-3.0.4/include/media/v4l2-ioctl.h linux-3.0.4/include/media/v4l2-ioctl.h
57707--- linux-3.0.4/include/media/v4l2-ioctl.h 2011-07-21 22:17:23.000000000 -0400
57708+++ linux-3.0.4/include/media/v4l2-ioctl.h 2011-08-24 18:25:45.000000000 -0400
57709@@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
57710 long (*vidioc_default) (struct file *file, void *fh,
57711 bool valid_prio, int cmd, void *arg);
57712 };
57713+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
57714
57715
57716 /* v4l debugging and diagnostics */
57717diff -urNp linux-3.0.4/include/net/caif/cfctrl.h linux-3.0.4/include/net/caif/cfctrl.h
57718--- linux-3.0.4/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
57719+++ linux-3.0.4/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
57720@@ -52,7 +52,7 @@ struct cfctrl_rsp {
57721 void (*radioset_rsp)(void);
57722 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
57723 struct cflayer *client_layer);
57724-};
57725+} __no_const;
57726
57727 /* Link Setup Parameters for CAIF-Links. */
57728 struct cfctrl_link_param {
57729@@ -101,8 +101,8 @@ struct cfctrl_request_info {
57730 struct cfctrl {
57731 struct cfsrvl serv;
57732 struct cfctrl_rsp res;
57733- atomic_t req_seq_no;
57734- atomic_t rsp_seq_no;
57735+ atomic_unchecked_t req_seq_no;
57736+ atomic_unchecked_t rsp_seq_no;
57737 struct list_head list;
57738 /* Protects from simultaneous access to first_req list */
57739 spinlock_t info_list_lock;
57740diff -urNp linux-3.0.4/include/net/flow.h linux-3.0.4/include/net/flow.h
57741--- linux-3.0.4/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
57742+++ linux-3.0.4/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
57743@@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
57744 u8 dir, flow_resolve_t resolver, void *ctx);
57745
57746 extern void flow_cache_flush(void);
57747-extern atomic_t flow_cache_genid;
57748+extern atomic_unchecked_t flow_cache_genid;
57749
57750 #endif
57751diff -urNp linux-3.0.4/include/net/inetpeer.h linux-3.0.4/include/net/inetpeer.h
57752--- linux-3.0.4/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
57753+++ linux-3.0.4/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
57754@@ -43,8 +43,8 @@ struct inet_peer {
57755 */
57756 union {
57757 struct {
57758- atomic_t rid; /* Frag reception counter */
57759- atomic_t ip_id_count; /* IP ID for the next packet */
57760+ atomic_unchecked_t rid; /* Frag reception counter */
57761+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
57762 __u32 tcp_ts;
57763 __u32 tcp_ts_stamp;
57764 u32 metrics[RTAX_MAX];
57765@@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
57766 {
57767 more++;
57768 inet_peer_refcheck(p);
57769- return atomic_add_return(more, &p->ip_id_count) - more;
57770+ return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
57771 }
57772
57773 #endif /* _NET_INETPEER_H */
57774diff -urNp linux-3.0.4/include/net/ip_fib.h linux-3.0.4/include/net/ip_fib.h
57775--- linux-3.0.4/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
57776+++ linux-3.0.4/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
57777@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
57778
57779 #define FIB_RES_SADDR(net, res) \
57780 ((FIB_RES_NH(res).nh_saddr_genid == \
57781- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
57782+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
57783 FIB_RES_NH(res).nh_saddr : \
57784 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
57785 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
57786diff -urNp linux-3.0.4/include/net/ip_vs.h linux-3.0.4/include/net/ip_vs.h
57787--- linux-3.0.4/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
57788+++ linux-3.0.4/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
57789@@ -509,7 +509,7 @@ struct ip_vs_conn {
57790 struct ip_vs_conn *control; /* Master control connection */
57791 atomic_t n_control; /* Number of controlled ones */
57792 struct ip_vs_dest *dest; /* real server */
57793- atomic_t in_pkts; /* incoming packet counter */
57794+ atomic_unchecked_t in_pkts; /* incoming packet counter */
57795
57796 /* packet transmitter for different forwarding methods. If it
57797 mangles the packet, it must return NF_DROP or better NF_STOLEN,
57798@@ -647,7 +647,7 @@ struct ip_vs_dest {
57799 __be16 port; /* port number of the server */
57800 union nf_inet_addr addr; /* IP address of the server */
57801 volatile unsigned flags; /* dest status flags */
57802- atomic_t conn_flags; /* flags to copy to conn */
57803+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
57804 atomic_t weight; /* server weight */
57805
57806 atomic_t refcnt; /* reference counter */
57807diff -urNp linux-3.0.4/include/net/irda/ircomm_core.h linux-3.0.4/include/net/irda/ircomm_core.h
57808--- linux-3.0.4/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
57809+++ linux-3.0.4/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
57810@@ -51,7 +51,7 @@ typedef struct {
57811 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
57812 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
57813 struct ircomm_info *);
57814-} call_t;
57815+} __no_const call_t;
57816
57817 struct ircomm_cb {
57818 irda_queue_t queue;
57819diff -urNp linux-3.0.4/include/net/irda/ircomm_tty.h linux-3.0.4/include/net/irda/ircomm_tty.h
57820--- linux-3.0.4/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
57821+++ linux-3.0.4/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
57822@@ -35,6 +35,7 @@
57823 #include <linux/termios.h>
57824 #include <linux/timer.h>
57825 #include <linux/tty.h> /* struct tty_struct */
57826+#include <asm/local.h>
57827
57828 #include <net/irda/irias_object.h>
57829 #include <net/irda/ircomm_core.h>
57830@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
57831 unsigned short close_delay;
57832 unsigned short closing_wait; /* time to wait before closing */
57833
57834- int open_count;
57835- int blocked_open; /* # of blocked opens */
57836+ local_t open_count;
57837+ local_t blocked_open; /* # of blocked opens */
57838
57839 /* Protect concurent access to :
57840 * o self->open_count
57841diff -urNp linux-3.0.4/include/net/iucv/af_iucv.h linux-3.0.4/include/net/iucv/af_iucv.h
57842--- linux-3.0.4/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
57843+++ linux-3.0.4/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
57844@@ -87,7 +87,7 @@ struct iucv_sock {
57845 struct iucv_sock_list {
57846 struct hlist_head head;
57847 rwlock_t lock;
57848- atomic_t autobind_name;
57849+ atomic_unchecked_t autobind_name;
57850 };
57851
57852 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
57853diff -urNp linux-3.0.4/include/net/lapb.h linux-3.0.4/include/net/lapb.h
57854--- linux-3.0.4/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
57855+++ linux-3.0.4/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
57856@@ -95,7 +95,7 @@ struct lapb_cb {
57857 struct sk_buff_head write_queue;
57858 struct sk_buff_head ack_queue;
57859 unsigned char window;
57860- struct lapb_register_struct callbacks;
57861+ struct lapb_register_struct *callbacks;
57862
57863 /* FRMR control information */
57864 struct lapb_frame frmr_data;
57865diff -urNp linux-3.0.4/include/net/neighbour.h linux-3.0.4/include/net/neighbour.h
57866--- linux-3.0.4/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
57867+++ linux-3.0.4/include/net/neighbour.h 2011-08-31 18:39:25.000000000 -0400
57868@@ -124,7 +124,7 @@ struct neigh_ops {
57869 int (*connected_output)(struct sk_buff*);
57870 int (*hh_output)(struct sk_buff*);
57871 int (*queue_xmit)(struct sk_buff*);
57872-};
57873+} __do_const;
57874
57875 struct pneigh_entry {
57876 struct pneigh_entry *next;
57877diff -urNp linux-3.0.4/include/net/netlink.h linux-3.0.4/include/net/netlink.h
57878--- linux-3.0.4/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
57879+++ linux-3.0.4/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
57880@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
57881 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
57882 {
57883 if (mark)
57884- skb_trim(skb, (unsigned char *) mark - skb->data);
57885+ skb_trim(skb, (const unsigned char *) mark - skb->data);
57886 }
57887
57888 /**
57889diff -urNp linux-3.0.4/include/net/netns/ipv4.h linux-3.0.4/include/net/netns/ipv4.h
57890--- linux-3.0.4/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
57891+++ linux-3.0.4/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
57892@@ -56,8 +56,8 @@ struct netns_ipv4 {
57893
57894 unsigned int sysctl_ping_group_range[2];
57895
57896- atomic_t rt_genid;
57897- atomic_t dev_addr_genid;
57898+ atomic_unchecked_t rt_genid;
57899+ atomic_unchecked_t dev_addr_genid;
57900
57901 #ifdef CONFIG_IP_MROUTE
57902 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
57903diff -urNp linux-3.0.4/include/net/sctp/sctp.h linux-3.0.4/include/net/sctp/sctp.h
57904--- linux-3.0.4/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
57905+++ linux-3.0.4/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
57906@@ -315,9 +315,9 @@ do { \
57907
57908 #else /* SCTP_DEBUG */
57909
57910-#define SCTP_DEBUG_PRINTK(whatever...)
57911-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
57912-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
57913+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
57914+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
57915+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
57916 #define SCTP_ENABLE_DEBUG
57917 #define SCTP_DISABLE_DEBUG
57918 #define SCTP_ASSERT(expr, str, func)
57919diff -urNp linux-3.0.4/include/net/sock.h linux-3.0.4/include/net/sock.h
57920--- linux-3.0.4/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
57921+++ linux-3.0.4/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
57922@@ -277,7 +277,7 @@ struct sock {
57923 #ifdef CONFIG_RPS
57924 __u32 sk_rxhash;
57925 #endif
57926- atomic_t sk_drops;
57927+ atomic_unchecked_t sk_drops;
57928 int sk_rcvbuf;
57929
57930 struct sk_filter __rcu *sk_filter;
57931@@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
57932 }
57933
57934 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
57935- char __user *from, char *to,
57936+ char __user *from, unsigned char *to,
57937 int copy, int offset)
57938 {
57939 if (skb->ip_summed == CHECKSUM_NONE) {
57940diff -urNp linux-3.0.4/include/net/tcp.h linux-3.0.4/include/net/tcp.h
57941--- linux-3.0.4/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
57942+++ linux-3.0.4/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
57943@@ -1374,8 +1374,8 @@ enum tcp_seq_states {
57944 struct tcp_seq_afinfo {
57945 char *name;
57946 sa_family_t family;
57947- struct file_operations seq_fops;
57948- struct seq_operations seq_ops;
57949+ file_operations_no_const seq_fops;
57950+ seq_operations_no_const seq_ops;
57951 };
57952
57953 struct tcp_iter_state {
57954diff -urNp linux-3.0.4/include/net/udp.h linux-3.0.4/include/net/udp.h
57955--- linux-3.0.4/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
57956+++ linux-3.0.4/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
57957@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
57958 char *name;
57959 sa_family_t family;
57960 struct udp_table *udp_table;
57961- struct file_operations seq_fops;
57962- struct seq_operations seq_ops;
57963+ file_operations_no_const seq_fops;
57964+ seq_operations_no_const seq_ops;
57965 };
57966
57967 struct udp_iter_state {
57968diff -urNp linux-3.0.4/include/net/xfrm.h linux-3.0.4/include/net/xfrm.h
57969--- linux-3.0.4/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
57970+++ linux-3.0.4/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
57971@@ -505,7 +505,7 @@ struct xfrm_policy {
57972 struct timer_list timer;
57973
57974 struct flow_cache_object flo;
57975- atomic_t genid;
57976+ atomic_unchecked_t genid;
57977 u32 priority;
57978 u32 index;
57979 struct xfrm_mark mark;
57980diff -urNp linux-3.0.4/include/rdma/iw_cm.h linux-3.0.4/include/rdma/iw_cm.h
57981--- linux-3.0.4/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
57982+++ linux-3.0.4/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
57983@@ -120,7 +120,7 @@ struct iw_cm_verbs {
57984 int backlog);
57985
57986 int (*destroy_listen)(struct iw_cm_id *cm_id);
57987-};
57988+} __no_const;
57989
57990 /**
57991 * iw_create_cm_id - Create an IW CM identifier.
57992diff -urNp linux-3.0.4/include/scsi/libfc.h linux-3.0.4/include/scsi/libfc.h
57993--- linux-3.0.4/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
57994+++ linux-3.0.4/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
57995@@ -750,6 +750,7 @@ struct libfc_function_template {
57996 */
57997 void (*disc_stop_final) (struct fc_lport *);
57998 };
57999+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
58000
58001 /**
58002 * struct fc_disc - Discovery context
58003@@ -853,7 +854,7 @@ struct fc_lport {
58004 struct fc_vport *vport;
58005
58006 /* Operational Information */
58007- struct libfc_function_template tt;
58008+ libfc_function_template_no_const tt;
58009 u8 link_up;
58010 u8 qfull;
58011 enum fc_lport_state state;
58012diff -urNp linux-3.0.4/include/scsi/scsi_device.h linux-3.0.4/include/scsi/scsi_device.h
58013--- linux-3.0.4/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
58014+++ linux-3.0.4/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
58015@@ -161,9 +161,9 @@ struct scsi_device {
58016 unsigned int max_device_blocked; /* what device_blocked counts down from */
58017 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
58018
58019- atomic_t iorequest_cnt;
58020- atomic_t iodone_cnt;
58021- atomic_t ioerr_cnt;
58022+ atomic_unchecked_t iorequest_cnt;
58023+ atomic_unchecked_t iodone_cnt;
58024+ atomic_unchecked_t ioerr_cnt;
58025
58026 struct device sdev_gendev,
58027 sdev_dev;
58028diff -urNp linux-3.0.4/include/scsi/scsi_transport_fc.h linux-3.0.4/include/scsi/scsi_transport_fc.h
58029--- linux-3.0.4/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
58030+++ linux-3.0.4/include/scsi/scsi_transport_fc.h 2011-08-26 19:49:56.000000000 -0400
58031@@ -711,7 +711,7 @@ struct fc_function_template {
58032 unsigned long show_host_system_hostname:1;
58033
58034 unsigned long disable_target_scan:1;
58035-};
58036+} __do_const;
58037
58038
58039 /**
58040diff -urNp linux-3.0.4/include/sound/ak4xxx-adda.h linux-3.0.4/include/sound/ak4xxx-adda.h
58041--- linux-3.0.4/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
58042+++ linux-3.0.4/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
58043@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
58044 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
58045 unsigned char val);
58046 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
58047-};
58048+} __no_const;
58049
58050 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
58051
58052diff -urNp linux-3.0.4/include/sound/hwdep.h linux-3.0.4/include/sound/hwdep.h
58053--- linux-3.0.4/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
58054+++ linux-3.0.4/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
58055@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
58056 struct snd_hwdep_dsp_status *status);
58057 int (*dsp_load)(struct snd_hwdep *hw,
58058 struct snd_hwdep_dsp_image *image);
58059-};
58060+} __no_const;
58061
58062 struct snd_hwdep {
58063 struct snd_card *card;
58064diff -urNp linux-3.0.4/include/sound/info.h linux-3.0.4/include/sound/info.h
58065--- linux-3.0.4/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
58066+++ linux-3.0.4/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
58067@@ -44,7 +44,7 @@ struct snd_info_entry_text {
58068 struct snd_info_buffer *buffer);
58069 void (*write)(struct snd_info_entry *entry,
58070 struct snd_info_buffer *buffer);
58071-};
58072+} __no_const;
58073
58074 struct snd_info_entry_ops {
58075 int (*open)(struct snd_info_entry *entry,
58076diff -urNp linux-3.0.4/include/sound/pcm.h linux-3.0.4/include/sound/pcm.h
58077--- linux-3.0.4/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
58078+++ linux-3.0.4/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
58079@@ -81,6 +81,7 @@ struct snd_pcm_ops {
58080 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
58081 int (*ack)(struct snd_pcm_substream *substream);
58082 };
58083+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
58084
58085 /*
58086 *
58087diff -urNp linux-3.0.4/include/sound/sb16_csp.h linux-3.0.4/include/sound/sb16_csp.h
58088--- linux-3.0.4/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
58089+++ linux-3.0.4/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
58090@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
58091 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
58092 int (*csp_stop) (struct snd_sb_csp * p);
58093 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
58094-};
58095+} __no_const;
58096
58097 /*
58098 * CSP private data
58099diff -urNp linux-3.0.4/include/sound/soc.h linux-3.0.4/include/sound/soc.h
58100--- linux-3.0.4/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
58101+++ linux-3.0.4/include/sound/soc.h 2011-08-26 19:49:56.000000000 -0400
58102@@ -636,7 +636,7 @@ struct snd_soc_platform_driver {
58103
58104 /* platform stream ops */
58105 struct snd_pcm_ops *ops;
58106-};
58107+} __do_const;
58108
58109 struct snd_soc_platform {
58110 const char *name;
58111diff -urNp linux-3.0.4/include/sound/ymfpci.h linux-3.0.4/include/sound/ymfpci.h
58112--- linux-3.0.4/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
58113+++ linux-3.0.4/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
58114@@ -358,7 +358,7 @@ struct snd_ymfpci {
58115 spinlock_t reg_lock;
58116 spinlock_t voice_lock;
58117 wait_queue_head_t interrupt_sleep;
58118- atomic_t interrupt_sleep_count;
58119+ atomic_unchecked_t interrupt_sleep_count;
58120 struct snd_info_entry *proc_entry;
58121 const struct firmware *dsp_microcode;
58122 const struct firmware *controller_microcode;
58123diff -urNp linux-3.0.4/include/target/target_core_base.h linux-3.0.4/include/target/target_core_base.h
58124--- linux-3.0.4/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
58125+++ linux-3.0.4/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
58126@@ -364,7 +364,7 @@ struct t10_reservation_ops {
58127 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
58128 int (*t10_pr_register)(struct se_cmd *);
58129 int (*t10_pr_clear)(struct se_cmd *);
58130-};
58131+} __no_const;
58132
58133 struct t10_reservation_template {
58134 /* Reservation effects all target ports */
58135@@ -432,8 +432,8 @@ struct se_transport_task {
58136 atomic_t t_task_cdbs_left;
58137 atomic_t t_task_cdbs_ex_left;
58138 atomic_t t_task_cdbs_timeout_left;
58139- atomic_t t_task_cdbs_sent;
58140- atomic_t t_transport_aborted;
58141+ atomic_unchecked_t t_task_cdbs_sent;
58142+ atomic_unchecked_t t_transport_aborted;
58143 atomic_t t_transport_active;
58144 atomic_t t_transport_complete;
58145 atomic_t t_transport_queue_active;
58146@@ -774,7 +774,7 @@ struct se_device {
58147 atomic_t active_cmds;
58148 atomic_t simple_cmds;
58149 atomic_t depth_left;
58150- atomic_t dev_ordered_id;
58151+ atomic_unchecked_t dev_ordered_id;
58152 atomic_t dev_tur_active;
58153 atomic_t execute_tasks;
58154 atomic_t dev_status_thr_count;
58155diff -urNp linux-3.0.4/include/trace/events/irq.h linux-3.0.4/include/trace/events/irq.h
58156--- linux-3.0.4/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
58157+++ linux-3.0.4/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
58158@@ -36,7 +36,7 @@ struct softirq_action;
58159 */
58160 TRACE_EVENT(irq_handler_entry,
58161
58162- TP_PROTO(int irq, struct irqaction *action),
58163+ TP_PROTO(int irq, const struct irqaction *action),
58164
58165 TP_ARGS(irq, action),
58166
58167@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
58168 */
58169 TRACE_EVENT(irq_handler_exit,
58170
58171- TP_PROTO(int irq, struct irqaction *action, int ret),
58172+ TP_PROTO(int irq, const struct irqaction *action, int ret),
58173
58174 TP_ARGS(irq, action, ret),
58175
58176diff -urNp linux-3.0.4/include/video/udlfb.h linux-3.0.4/include/video/udlfb.h
58177--- linux-3.0.4/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
58178+++ linux-3.0.4/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
58179@@ -51,10 +51,10 @@ struct dlfb_data {
58180 int base8;
58181 u32 pseudo_palette[256];
58182 /* blit-only rendering path metrics, exposed through sysfs */
58183- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
58184- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
58185- atomic_t bytes_sent; /* to usb, after compression including overhead */
58186- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
58187+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
58188+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
58189+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
58190+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
58191 };
58192
58193 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
58194diff -urNp linux-3.0.4/include/video/uvesafb.h linux-3.0.4/include/video/uvesafb.h
58195--- linux-3.0.4/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
58196+++ linux-3.0.4/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
58197@@ -177,6 +177,7 @@ struct uvesafb_par {
58198 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
58199 u8 pmi_setpal; /* PMI for palette changes */
58200 u16 *pmi_base; /* protected mode interface location */
58201+ u8 *pmi_code; /* protected mode code location */
58202 void *pmi_start;
58203 void *pmi_pal;
58204 u8 *vbe_state_orig; /*
58205diff -urNp linux-3.0.4/init/do_mounts.c linux-3.0.4/init/do_mounts.c
58206--- linux-3.0.4/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
58207+++ linux-3.0.4/init/do_mounts.c 2011-08-23 21:47:56.000000000 -0400
58208@@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
58209
58210 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
58211 {
58212- int err = sys_mount(name, "/root", fs, flags, data);
58213+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
58214 if (err)
58215 return err;
58216
58217@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
58218 va_start(args, fmt);
58219 vsprintf(buf, fmt, args);
58220 va_end(args);
58221- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
58222+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
58223 if (fd >= 0) {
58224 sys_ioctl(fd, FDEJECT, 0);
58225 sys_close(fd);
58226 }
58227 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
58228- fd = sys_open("/dev/console", O_RDWR, 0);
58229+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
58230 if (fd >= 0) {
58231 sys_ioctl(fd, TCGETS, (long)&termios);
58232 termios.c_lflag &= ~ICANON;
58233 sys_ioctl(fd, TCSETSF, (long)&termios);
58234- sys_read(fd, &c, 1);
58235+ sys_read(fd, (char __user *)&c, 1);
58236 termios.c_lflag |= ICANON;
58237 sys_ioctl(fd, TCSETSF, (long)&termios);
58238 sys_close(fd);
58239@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
58240 mount_root();
58241 out:
58242 devtmpfs_mount("dev");
58243- sys_mount(".", "/", NULL, MS_MOVE, NULL);
58244+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58245 sys_chroot((const char __user __force *)".");
58246 }
58247diff -urNp linux-3.0.4/init/do_mounts.h linux-3.0.4/init/do_mounts.h
58248--- linux-3.0.4/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
58249+++ linux-3.0.4/init/do_mounts.h 2011-08-23 21:47:56.000000000 -0400
58250@@ -15,15 +15,15 @@ extern int root_mountflags;
58251
58252 static inline int create_dev(char *name, dev_t dev)
58253 {
58254- sys_unlink(name);
58255- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
58256+ sys_unlink((__force char __user *)name);
58257+ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
58258 }
58259
58260 #if BITS_PER_LONG == 32
58261 static inline u32 bstat(char *name)
58262 {
58263 struct stat64 stat;
58264- if (sys_stat64(name, &stat) != 0)
58265+ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
58266 return 0;
58267 if (!S_ISBLK(stat.st_mode))
58268 return 0;
58269diff -urNp linux-3.0.4/init/do_mounts_initrd.c linux-3.0.4/init/do_mounts_initrd.c
58270--- linux-3.0.4/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
58271+++ linux-3.0.4/init/do_mounts_initrd.c 2011-08-23 21:47:56.000000000 -0400
58272@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
58273 create_dev("/dev/root.old", Root_RAM0);
58274 /* mount initrd on rootfs' /root */
58275 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
58276- sys_mkdir("/old", 0700);
58277- root_fd = sys_open("/", 0, 0);
58278- old_fd = sys_open("/old", 0, 0);
58279+ sys_mkdir((__force const char __user *)"/old", 0700);
58280+ root_fd = sys_open((__force const char __user *)"/", 0, 0);
58281+ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
58282 /* move initrd over / and chdir/chroot in initrd root */
58283- sys_chdir("/root");
58284- sys_mount(".", "/", NULL, MS_MOVE, NULL);
58285- sys_chroot(".");
58286+ sys_chdir((__force const char __user *)"/root");
58287+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58288+ sys_chroot((__force const char __user *)".");
58289
58290 /*
58291 * In case that a resume from disk is carried out by linuxrc or one of
58292@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
58293
58294 /* move initrd to rootfs' /old */
58295 sys_fchdir(old_fd);
58296- sys_mount("/", ".", NULL, MS_MOVE, NULL);
58297+ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
58298 /* switch root and cwd back to / of rootfs */
58299 sys_fchdir(root_fd);
58300- sys_chroot(".");
58301+ sys_chroot((__force const char __user *)".");
58302 sys_close(old_fd);
58303 sys_close(root_fd);
58304
58305 if (new_decode_dev(real_root_dev) == Root_RAM0) {
58306- sys_chdir("/old");
58307+ sys_chdir((__force const char __user *)"/old");
58308 return;
58309 }
58310
58311@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
58312 mount_root();
58313
58314 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
58315- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
58316+ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
58317 if (!error)
58318 printk("okay\n");
58319 else {
58320- int fd = sys_open("/dev/root.old", O_RDWR, 0);
58321+ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
58322 if (error == -ENOENT)
58323 printk("/initrd does not exist. Ignored.\n");
58324 else
58325 printk("failed\n");
58326 printk(KERN_NOTICE "Unmounting old root\n");
58327- sys_umount("/old", MNT_DETACH);
58328+ sys_umount((__force char __user *)"/old", MNT_DETACH);
58329 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
58330 if (fd < 0) {
58331 error = fd;
58332@@ -116,11 +116,11 @@ int __init initrd_load(void)
58333 * mounted in the normal path.
58334 */
58335 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
58336- sys_unlink("/initrd.image");
58337+ sys_unlink((__force const char __user *)"/initrd.image");
58338 handle_initrd();
58339 return 1;
58340 }
58341 }
58342- sys_unlink("/initrd.image");
58343+ sys_unlink((__force const char __user *)"/initrd.image");
58344 return 0;
58345 }
58346diff -urNp linux-3.0.4/init/do_mounts_md.c linux-3.0.4/init/do_mounts_md.c
58347--- linux-3.0.4/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
58348+++ linux-3.0.4/init/do_mounts_md.c 2011-08-23 21:47:56.000000000 -0400
58349@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
58350 partitioned ? "_d" : "", minor,
58351 md_setup_args[ent].device_names);
58352
58353- fd = sys_open(name, 0, 0);
58354+ fd = sys_open((__force char __user *)name, 0, 0);
58355 if (fd < 0) {
58356 printk(KERN_ERR "md: open failed - cannot start "
58357 "array %s\n", name);
58358@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
58359 * array without it
58360 */
58361 sys_close(fd);
58362- fd = sys_open(name, 0, 0);
58363+ fd = sys_open((__force char __user *)name, 0, 0);
58364 sys_ioctl(fd, BLKRRPART, 0);
58365 }
58366 sys_close(fd);
58367diff -urNp linux-3.0.4/init/initramfs.c linux-3.0.4/init/initramfs.c
58368--- linux-3.0.4/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
58369+++ linux-3.0.4/init/initramfs.c 2011-08-23 21:47:56.000000000 -0400
58370@@ -74,7 +74,7 @@ static void __init free_hash(void)
58371 }
58372 }
58373
58374-static long __init do_utime(char __user *filename, time_t mtime)
58375+static long __init do_utime(__force char __user *filename, time_t mtime)
58376 {
58377 struct timespec t[2];
58378
58379@@ -109,7 +109,7 @@ static void __init dir_utime(void)
58380 struct dir_entry *de, *tmp;
58381 list_for_each_entry_safe(de, tmp, &dir_list, list) {
58382 list_del(&de->list);
58383- do_utime(de->name, de->mtime);
58384+ do_utime((__force char __user *)de->name, de->mtime);
58385 kfree(de->name);
58386 kfree(de);
58387 }
58388@@ -271,7 +271,7 @@ static int __init maybe_link(void)
58389 if (nlink >= 2) {
58390 char *old = find_link(major, minor, ino, mode, collected);
58391 if (old)
58392- return (sys_link(old, collected) < 0) ? -1 : 1;
58393+ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
58394 }
58395 return 0;
58396 }
58397@@ -280,11 +280,11 @@ static void __init clean_path(char *path
58398 {
58399 struct stat st;
58400
58401- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
58402+ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
58403 if (S_ISDIR(st.st_mode))
58404- sys_rmdir(path);
58405+ sys_rmdir((__force char __user *)path);
58406 else
58407- sys_unlink(path);
58408+ sys_unlink((__force char __user *)path);
58409 }
58410 }
58411
58412@@ -305,7 +305,7 @@ static int __init do_name(void)
58413 int openflags = O_WRONLY|O_CREAT;
58414 if (ml != 1)
58415 openflags |= O_TRUNC;
58416- wfd = sys_open(collected, openflags, mode);
58417+ wfd = sys_open((__force char __user *)collected, openflags, mode);
58418
58419 if (wfd >= 0) {
58420 sys_fchown(wfd, uid, gid);
58421@@ -317,17 +317,17 @@ static int __init do_name(void)
58422 }
58423 }
58424 } else if (S_ISDIR(mode)) {
58425- sys_mkdir(collected, mode);
58426- sys_chown(collected, uid, gid);
58427- sys_chmod(collected, mode);
58428+ sys_mkdir((__force char __user *)collected, mode);
58429+ sys_chown((__force char __user *)collected, uid, gid);
58430+ sys_chmod((__force char __user *)collected, mode);
58431 dir_add(collected, mtime);
58432 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
58433 S_ISFIFO(mode) || S_ISSOCK(mode)) {
58434 if (maybe_link() == 0) {
58435- sys_mknod(collected, mode, rdev);
58436- sys_chown(collected, uid, gid);
58437- sys_chmod(collected, mode);
58438- do_utime(collected, mtime);
58439+ sys_mknod((__force char __user *)collected, mode, rdev);
58440+ sys_chown((__force char __user *)collected, uid, gid);
58441+ sys_chmod((__force char __user *)collected, mode);
58442+ do_utime((__force char __user *)collected, mtime);
58443 }
58444 }
58445 return 0;
58446@@ -336,15 +336,15 @@ static int __init do_name(void)
58447 static int __init do_copy(void)
58448 {
58449 if (count >= body_len) {
58450- sys_write(wfd, victim, body_len);
58451+ sys_write(wfd, (__force char __user *)victim, body_len);
58452 sys_close(wfd);
58453- do_utime(vcollected, mtime);
58454+ do_utime((__force char __user *)vcollected, mtime);
58455 kfree(vcollected);
58456 eat(body_len);
58457 state = SkipIt;
58458 return 0;
58459 } else {
58460- sys_write(wfd, victim, count);
58461+ sys_write(wfd, (__force char __user *)victim, count);
58462 body_len -= count;
58463 eat(count);
58464 return 1;
58465@@ -355,9 +355,9 @@ static int __init do_symlink(void)
58466 {
58467 collected[N_ALIGN(name_len) + body_len] = '\0';
58468 clean_path(collected, 0);
58469- sys_symlink(collected + N_ALIGN(name_len), collected);
58470- sys_lchown(collected, uid, gid);
58471- do_utime(collected, mtime);
58472+ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
58473+ sys_lchown((__force char __user *)collected, uid, gid);
58474+ do_utime((__force char __user *)collected, mtime);
58475 state = SkipIt;
58476 next_state = Reset;
58477 return 0;
58478diff -urNp linux-3.0.4/init/Kconfig linux-3.0.4/init/Kconfig
58479--- linux-3.0.4/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
58480+++ linux-3.0.4/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
58481@@ -1195,7 +1195,7 @@ config SLUB_DEBUG
58482
58483 config COMPAT_BRK
58484 bool "Disable heap randomization"
58485- default y
58486+ default n
58487 help
58488 Randomizing heap placement makes heap exploits harder, but it
58489 also breaks ancient binaries (including anything libc5 based).
58490diff -urNp linux-3.0.4/init/main.c linux-3.0.4/init/main.c
58491--- linux-3.0.4/init/main.c 2011-07-21 22:17:23.000000000 -0400
58492+++ linux-3.0.4/init/main.c 2011-08-23 21:48:14.000000000 -0400
58493@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
58494 extern void tc_init(void);
58495 #endif
58496
58497+extern void grsecurity_init(void);
58498+
58499 /*
58500 * Debug helper: via this flag we know that we are in 'early bootup code'
58501 * where only the boot processor is running with IRQ disabled. This means
58502@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
58503
58504 __setup("reset_devices", set_reset_devices);
58505
58506+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
58507+extern char pax_enter_kernel_user[];
58508+extern char pax_exit_kernel_user[];
58509+extern pgdval_t clone_pgd_mask;
58510+#endif
58511+
58512+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
58513+static int __init setup_pax_nouderef(char *str)
58514+{
58515+#ifdef CONFIG_X86_32
58516+ unsigned int cpu;
58517+ struct desc_struct *gdt;
58518+
58519+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
58520+ gdt = get_cpu_gdt_table(cpu);
58521+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
58522+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
58523+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
58524+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
58525+ }
58526+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
58527+#else
58528+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
58529+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
58530+ clone_pgd_mask = ~(pgdval_t)0UL;
58531+#endif
58532+
58533+ return 0;
58534+}
58535+early_param("pax_nouderef", setup_pax_nouderef);
58536+#endif
58537+
58538+#ifdef CONFIG_PAX_SOFTMODE
58539+int pax_softmode;
58540+
58541+static int __init setup_pax_softmode(char *str)
58542+{
58543+ get_option(&str, &pax_softmode);
58544+ return 1;
58545+}
58546+__setup("pax_softmode=", setup_pax_softmode);
58547+#endif
58548+
58549 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
58550 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
58551 static const char *panic_later, *panic_param;
58552@@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
58553 {
58554 int count = preempt_count();
58555 int ret;
58556+ const char *msg1 = "", *msg2 = "";
58557
58558 if (initcall_debug)
58559 ret = do_one_initcall_debug(fn);
58560@@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
58561 sprintf(msgbuf, "error code %d ", ret);
58562
58563 if (preempt_count() != count) {
58564- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
58565+ msg1 = " preemption imbalance";
58566 preempt_count() = count;
58567 }
58568 if (irqs_disabled()) {
58569- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
58570+ msg2 = " disabled interrupts";
58571 local_irq_enable();
58572 }
58573- if (msgbuf[0]) {
58574- printk("initcall %pF returned with %s\n", fn, msgbuf);
58575+ if (msgbuf[0] || *msg1 || *msg2) {
58576+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
58577 }
58578
58579 return ret;
58580@@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
58581 do_basic_setup();
58582
58583 /* Open the /dev/console on the rootfs, this should never fail */
58584- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
58585+ if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
58586 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
58587
58588 (void) sys_dup(0);
58589@@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
58590 if (!ramdisk_execute_command)
58591 ramdisk_execute_command = "/init";
58592
58593- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
58594+ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
58595 ramdisk_execute_command = NULL;
58596 prepare_namespace();
58597 }
58598
58599+ grsecurity_init();
58600+
58601 /*
58602 * Ok, we have completed the initial bootup, and
58603 * we're essentially up and running. Get rid of the
58604diff -urNp linux-3.0.4/ipc/mqueue.c linux-3.0.4/ipc/mqueue.c
58605--- linux-3.0.4/ipc/mqueue.c 2011-07-21 22:17:23.000000000 -0400
58606+++ linux-3.0.4/ipc/mqueue.c 2011-08-23 21:48:14.000000000 -0400
58607@@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
58608 mq_bytes = (mq_msg_tblsz +
58609 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
58610
58611+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
58612 spin_lock(&mq_lock);
58613 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
58614 u->mq_bytes + mq_bytes >
58615diff -urNp linux-3.0.4/ipc/msg.c linux-3.0.4/ipc/msg.c
58616--- linux-3.0.4/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
58617+++ linux-3.0.4/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
58618@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
58619 return security_msg_queue_associate(msq, msgflg);
58620 }
58621
58622+static struct ipc_ops msg_ops = {
58623+ .getnew = newque,
58624+ .associate = msg_security,
58625+ .more_checks = NULL
58626+};
58627+
58628 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
58629 {
58630 struct ipc_namespace *ns;
58631- struct ipc_ops msg_ops;
58632 struct ipc_params msg_params;
58633
58634 ns = current->nsproxy->ipc_ns;
58635
58636- msg_ops.getnew = newque;
58637- msg_ops.associate = msg_security;
58638- msg_ops.more_checks = NULL;
58639-
58640 msg_params.key = key;
58641 msg_params.flg = msgflg;
58642
58643diff -urNp linux-3.0.4/ipc/sem.c linux-3.0.4/ipc/sem.c
58644--- linux-3.0.4/ipc/sem.c 2011-09-02 18:11:21.000000000 -0400
58645+++ linux-3.0.4/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
58646@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
58647 return 0;
58648 }
58649
58650+static struct ipc_ops sem_ops = {
58651+ .getnew = newary,
58652+ .associate = sem_security,
58653+ .more_checks = sem_more_checks
58654+};
58655+
58656 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
58657 {
58658 struct ipc_namespace *ns;
58659- struct ipc_ops sem_ops;
58660 struct ipc_params sem_params;
58661
58662 ns = current->nsproxy->ipc_ns;
58663@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
58664 if (nsems < 0 || nsems > ns->sc_semmsl)
58665 return -EINVAL;
58666
58667- sem_ops.getnew = newary;
58668- sem_ops.associate = sem_security;
58669- sem_ops.more_checks = sem_more_checks;
58670-
58671 sem_params.key = key;
58672 sem_params.flg = semflg;
58673 sem_params.u.nsems = nsems;
58674@@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
58675 int nsems;
58676 struct list_head tasks;
58677
58678+ pax_track_stack();
58679+
58680 sma = sem_lock_check(ns, semid);
58681 if (IS_ERR(sma))
58682 return PTR_ERR(sma);
58683@@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
58684 struct ipc_namespace *ns;
58685 struct list_head tasks;
58686
58687+ pax_track_stack();
58688+
58689 ns = current->nsproxy->ipc_ns;
58690
58691 if (nsops < 1 || semid < 0)
58692diff -urNp linux-3.0.4/ipc/shm.c linux-3.0.4/ipc/shm.c
58693--- linux-3.0.4/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
58694+++ linux-3.0.4/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
58695@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
58696 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
58697 #endif
58698
58699+#ifdef CONFIG_GRKERNSEC
58700+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58701+ const time_t shm_createtime, const uid_t cuid,
58702+ const int shmid);
58703+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58704+ const time_t shm_createtime);
58705+#endif
58706+
58707 void shm_init_ns(struct ipc_namespace *ns)
58708 {
58709 ns->shm_ctlmax = SHMMAX;
58710@@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
58711 shp->shm_lprid = 0;
58712 shp->shm_atim = shp->shm_dtim = 0;
58713 shp->shm_ctim = get_seconds();
58714+#ifdef CONFIG_GRKERNSEC
58715+ {
58716+ struct timespec timeval;
58717+ do_posix_clock_monotonic_gettime(&timeval);
58718+
58719+ shp->shm_createtime = timeval.tv_sec;
58720+ }
58721+#endif
58722 shp->shm_segsz = size;
58723 shp->shm_nattch = 0;
58724 shp->shm_file = file;
58725@@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
58726 return 0;
58727 }
58728
58729+static struct ipc_ops shm_ops = {
58730+ .getnew = newseg,
58731+ .associate = shm_security,
58732+ .more_checks = shm_more_checks
58733+};
58734+
58735 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
58736 {
58737 struct ipc_namespace *ns;
58738- struct ipc_ops shm_ops;
58739 struct ipc_params shm_params;
58740
58741 ns = current->nsproxy->ipc_ns;
58742
58743- shm_ops.getnew = newseg;
58744- shm_ops.associate = shm_security;
58745- shm_ops.more_checks = shm_more_checks;
58746-
58747 shm_params.key = key;
58748 shm_params.flg = shmflg;
58749 shm_params.u.size = size;
58750@@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
58751 case SHM_LOCK:
58752 case SHM_UNLOCK:
58753 {
58754- struct file *uninitialized_var(shm_file);
58755-
58756 lru_add_drain_all(); /* drain pagevecs to lru lists */
58757
58758 shp = shm_lock_check(ns, shmid);
58759@@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
58760 if (err)
58761 goto out_unlock;
58762
58763+#ifdef CONFIG_GRKERNSEC
58764+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
58765+ shp->shm_perm.cuid, shmid) ||
58766+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
58767+ err = -EACCES;
58768+ goto out_unlock;
58769+ }
58770+#endif
58771+
58772 path = shp->shm_file->f_path;
58773 path_get(&path);
58774 shp->shm_nattch++;
58775+#ifdef CONFIG_GRKERNSEC
58776+ shp->shm_lapid = current->pid;
58777+#endif
58778 size = i_size_read(path.dentry->d_inode);
58779 shm_unlock(shp);
58780
58781diff -urNp linux-3.0.4/kernel/acct.c linux-3.0.4/kernel/acct.c
58782--- linux-3.0.4/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
58783+++ linux-3.0.4/kernel/acct.c 2011-08-23 21:47:56.000000000 -0400
58784@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
58785 */
58786 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
58787 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
58788- file->f_op->write(file, (char *)&ac,
58789+ file->f_op->write(file, (__force char __user *)&ac,
58790 sizeof(acct_t), &file->f_pos);
58791 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
58792 set_fs(fs);
58793diff -urNp linux-3.0.4/kernel/audit.c linux-3.0.4/kernel/audit.c
58794--- linux-3.0.4/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
58795+++ linux-3.0.4/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
58796@@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
58797 3) suppressed due to audit_rate_limit
58798 4) suppressed due to audit_backlog_limit
58799 */
58800-static atomic_t audit_lost = ATOMIC_INIT(0);
58801+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
58802
58803 /* The netlink socket. */
58804 static struct sock *audit_sock;
58805@@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
58806 unsigned long now;
58807 int print;
58808
58809- atomic_inc(&audit_lost);
58810+ atomic_inc_unchecked(&audit_lost);
58811
58812 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
58813
58814@@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
58815 printk(KERN_WARNING
58816 "audit: audit_lost=%d audit_rate_limit=%d "
58817 "audit_backlog_limit=%d\n",
58818- atomic_read(&audit_lost),
58819+ atomic_read_unchecked(&audit_lost),
58820 audit_rate_limit,
58821 audit_backlog_limit);
58822 audit_panic(message);
58823@@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
58824 status_set.pid = audit_pid;
58825 status_set.rate_limit = audit_rate_limit;
58826 status_set.backlog_limit = audit_backlog_limit;
58827- status_set.lost = atomic_read(&audit_lost);
58828+ status_set.lost = atomic_read_unchecked(&audit_lost);
58829 status_set.backlog = skb_queue_len(&audit_skb_queue);
58830 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
58831 &status_set, sizeof(status_set));
58832diff -urNp linux-3.0.4/kernel/auditsc.c linux-3.0.4/kernel/auditsc.c
58833--- linux-3.0.4/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
58834+++ linux-3.0.4/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
58835@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
58836 }
58837
58838 /* global counter which is incremented every time something logs in */
58839-static atomic_t session_id = ATOMIC_INIT(0);
58840+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
58841
58842 /**
58843 * audit_set_loginuid - set a task's audit_context loginuid
58844@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
58845 */
58846 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
58847 {
58848- unsigned int sessionid = atomic_inc_return(&session_id);
58849+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
58850 struct audit_context *context = task->audit_context;
58851
58852 if (context && context->in_syscall) {
58853diff -urNp linux-3.0.4/kernel/capability.c linux-3.0.4/kernel/capability.c
58854--- linux-3.0.4/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
58855+++ linux-3.0.4/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
58856@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
58857 * before modification is attempted and the application
58858 * fails.
58859 */
58860+ if (tocopy > ARRAY_SIZE(kdata))
58861+ return -EFAULT;
58862+
58863 if (copy_to_user(dataptr, kdata, tocopy
58864 * sizeof(struct __user_cap_data_struct))) {
58865 return -EFAULT;
58866@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
58867 BUG();
58868 }
58869
58870- if (security_capable(ns, current_cred(), cap) == 0) {
58871+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
58872 current->flags |= PF_SUPERPRIV;
58873 return true;
58874 }
58875@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
58876 }
58877 EXPORT_SYMBOL(ns_capable);
58878
58879+bool ns_capable_nolog(struct user_namespace *ns, int cap)
58880+{
58881+ if (unlikely(!cap_valid(cap))) {
58882+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
58883+ BUG();
58884+ }
58885+
58886+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
58887+ current->flags |= PF_SUPERPRIV;
58888+ return true;
58889+ }
58890+ return false;
58891+}
58892+EXPORT_SYMBOL(ns_capable_nolog);
58893+
58894+bool capable_nolog(int cap)
58895+{
58896+ return ns_capable_nolog(&init_user_ns, cap);
58897+}
58898+EXPORT_SYMBOL(capable_nolog);
58899+
58900 /**
58901 * task_ns_capable - Determine whether current task has a superior
58902 * capability targeted at a specific task's user namespace.
58903@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
58904 }
58905 EXPORT_SYMBOL(task_ns_capable);
58906
58907+bool task_ns_capable_nolog(struct task_struct *t, int cap)
58908+{
58909+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
58910+}
58911+EXPORT_SYMBOL(task_ns_capable_nolog);
58912+
58913 /**
58914 * nsown_capable - Check superior capability to one's own user_ns
58915 * @cap: The capability in question
58916diff -urNp linux-3.0.4/kernel/cgroup.c linux-3.0.4/kernel/cgroup.c
58917--- linux-3.0.4/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
58918+++ linux-3.0.4/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
58919@@ -593,6 +593,8 @@ static struct css_set *find_css_set(
58920 struct hlist_head *hhead;
58921 struct cg_cgroup_link *link;
58922
58923+ pax_track_stack();
58924+
58925 /* First see if we already have a cgroup group that matches
58926 * the desired set */
58927 read_lock(&css_set_lock);
58928diff -urNp linux-3.0.4/kernel/compat.c linux-3.0.4/kernel/compat.c
58929--- linux-3.0.4/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
58930+++ linux-3.0.4/kernel/compat.c 2011-08-23 21:48:14.000000000 -0400
58931@@ -13,6 +13,7 @@
58932
58933 #include <linux/linkage.h>
58934 #include <linux/compat.h>
58935+#include <linux/module.h>
58936 #include <linux/errno.h>
58937 #include <linux/time.h>
58938 #include <linux/signal.h>
58939diff -urNp linux-3.0.4/kernel/configs.c linux-3.0.4/kernel/configs.c
58940--- linux-3.0.4/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
58941+++ linux-3.0.4/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
58942@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
58943 struct proc_dir_entry *entry;
58944
58945 /* create the current config file */
58946+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
58947+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
58948+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
58949+ &ikconfig_file_ops);
58950+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58951+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
58952+ &ikconfig_file_ops);
58953+#endif
58954+#else
58955 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
58956 &ikconfig_file_ops);
58957+#endif
58958+
58959 if (!entry)
58960 return -ENOMEM;
58961
58962diff -urNp linux-3.0.4/kernel/cred.c linux-3.0.4/kernel/cred.c
58963--- linux-3.0.4/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
58964+++ linux-3.0.4/kernel/cred.c 2011-08-25 17:23:03.000000000 -0400
58965@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
58966 */
58967 void __put_cred(struct cred *cred)
58968 {
58969+ pax_track_stack();
58970+
58971 kdebug("__put_cred(%p{%d,%d})", cred,
58972 atomic_read(&cred->usage),
58973 read_cred_subscribers(cred));
58974@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
58975 {
58976 struct cred *cred;
58977
58978+ pax_track_stack();
58979+
58980 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
58981 atomic_read(&tsk->cred->usage),
58982 read_cred_subscribers(tsk->cred));
58983@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
58984 {
58985 const struct cred *cred;
58986
58987+ pax_track_stack();
58988+
58989 rcu_read_lock();
58990
58991 do {
58992@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
58993 {
58994 struct cred *new;
58995
58996+ pax_track_stack();
58997+
58998 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
58999 if (!new)
59000 return NULL;
59001@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
59002 const struct cred *old;
59003 struct cred *new;
59004
59005+ pax_track_stack();
59006+
59007 validate_process_creds();
59008
59009 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59010@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
59011 struct thread_group_cred *tgcred = NULL;
59012 struct cred *new;
59013
59014+ pax_track_stack();
59015+
59016 #ifdef CONFIG_KEYS
59017 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
59018 if (!tgcred)
59019@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
59020 struct cred *new;
59021 int ret;
59022
59023+ pax_track_stack();
59024+
59025 if (
59026 #ifdef CONFIG_KEYS
59027 !p->cred->thread_keyring &&
59028@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
59029 struct task_struct *task = current;
59030 const struct cred *old = task->real_cred;
59031
59032+ pax_track_stack();
59033+
59034 kdebug("commit_creds(%p{%d,%d})", new,
59035 atomic_read(&new->usage),
59036 read_cred_subscribers(new));
59037@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
59038
59039 get_cred(new); /* we will require a ref for the subj creds too */
59040
59041+ gr_set_role_label(task, new->uid, new->gid);
59042+
59043 /* dumpability changes */
59044 if (old->euid != new->euid ||
59045 old->egid != new->egid ||
59046@@ -508,10 +526,8 @@ int commit_creds(struct cred *new)
59047 key_fsgid_changed(task);
59048
59049 /* do it
59050- * - What if a process setreuid()'s and this brings the
59051- * new uid over his NPROC rlimit? We can check this now
59052- * cheaply with the new uid cache, so if it matters
59053- * we should be checking for it. -DaveM
59054+ * RLIMIT_NPROC limits on user->processes have already been checked
59055+ * in set_user().
59056 */
59057 alter_cred_subscribers(new, 2);
59058 if (new->user != old->user)
59059@@ -551,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
59060 */
59061 void abort_creds(struct cred *new)
59062 {
59063+ pax_track_stack();
59064+
59065 kdebug("abort_creds(%p{%d,%d})", new,
59066 atomic_read(&new->usage),
59067 read_cred_subscribers(new));
59068@@ -574,6 +592,8 @@ const struct cred *override_creds(const
59069 {
59070 const struct cred *old = current->cred;
59071
59072+ pax_track_stack();
59073+
59074 kdebug("override_creds(%p{%d,%d})", new,
59075 atomic_read(&new->usage),
59076 read_cred_subscribers(new));
59077@@ -603,6 +623,8 @@ void revert_creds(const struct cred *old
59078 {
59079 const struct cred *override = current->cred;
59080
59081+ pax_track_stack();
59082+
59083 kdebug("revert_creds(%p{%d,%d})", old,
59084 atomic_read(&old->usage),
59085 read_cred_subscribers(old));
59086@@ -649,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
59087 const struct cred *old;
59088 struct cred *new;
59089
59090+ pax_track_stack();
59091+
59092 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59093 if (!new)
59094 return NULL;
59095@@ -703,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
59096 */
59097 int set_security_override(struct cred *new, u32 secid)
59098 {
59099+ pax_track_stack();
59100+
59101 return security_kernel_act_as(new, secid);
59102 }
59103 EXPORT_SYMBOL(set_security_override);
59104@@ -722,6 +748,8 @@ int set_security_override_from_ctx(struc
59105 u32 secid;
59106 int ret;
59107
59108+ pax_track_stack();
59109+
59110 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
59111 if (ret < 0)
59112 return ret;
59113diff -urNp linux-3.0.4/kernel/debug/debug_core.c linux-3.0.4/kernel/debug/debug_core.c
59114--- linux-3.0.4/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
59115+++ linux-3.0.4/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
59116@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
59117 */
59118 static atomic_t masters_in_kgdb;
59119 static atomic_t slaves_in_kgdb;
59120-static atomic_t kgdb_break_tasklet_var;
59121+static atomic_unchecked_t kgdb_break_tasklet_var;
59122 atomic_t kgdb_setting_breakpoint;
59123
59124 struct task_struct *kgdb_usethread;
59125@@ -129,7 +129,7 @@ int kgdb_single_step;
59126 static pid_t kgdb_sstep_pid;
59127
59128 /* to keep track of the CPU which is doing the single stepping*/
59129-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59130+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59131
59132 /*
59133 * If you are debugging a problem where roundup (the collection of
59134@@ -542,7 +542,7 @@ return_normal:
59135 * kernel will only try for the value of sstep_tries before
59136 * giving up and continuing on.
59137 */
59138- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
59139+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
59140 (kgdb_info[cpu].task &&
59141 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
59142 atomic_set(&kgdb_active, -1);
59143@@ -636,8 +636,8 @@ cpu_master_loop:
59144 }
59145
59146 kgdb_restore:
59147- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
59148- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
59149+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
59150+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
59151 if (kgdb_info[sstep_cpu].task)
59152 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
59153 else
59154@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
59155 static void kgdb_tasklet_bpt(unsigned long ing)
59156 {
59157 kgdb_breakpoint();
59158- atomic_set(&kgdb_break_tasklet_var, 0);
59159+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
59160 }
59161
59162 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
59163
59164 void kgdb_schedule_breakpoint(void)
59165 {
59166- if (atomic_read(&kgdb_break_tasklet_var) ||
59167+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
59168 atomic_read(&kgdb_active) != -1 ||
59169 atomic_read(&kgdb_setting_breakpoint))
59170 return;
59171- atomic_inc(&kgdb_break_tasklet_var);
59172+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
59173 tasklet_schedule(&kgdb_tasklet_breakpoint);
59174 }
59175 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
59176diff -urNp linux-3.0.4/kernel/debug/kdb/kdb_main.c linux-3.0.4/kernel/debug/kdb/kdb_main.c
59177--- linux-3.0.4/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
59178+++ linux-3.0.4/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
59179@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
59180 list_for_each_entry(mod, kdb_modules, list) {
59181
59182 kdb_printf("%-20s%8u 0x%p ", mod->name,
59183- mod->core_size, (void *)mod);
59184+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
59185 #ifdef CONFIG_MODULE_UNLOAD
59186 kdb_printf("%4d ", module_refcount(mod));
59187 #endif
59188@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
59189 kdb_printf(" (Loading)");
59190 else
59191 kdb_printf(" (Live)");
59192- kdb_printf(" 0x%p", mod->module_core);
59193+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
59194
59195 #ifdef CONFIG_MODULE_UNLOAD
59196 {
59197diff -urNp linux-3.0.4/kernel/events/core.c linux-3.0.4/kernel/events/core.c
59198--- linux-3.0.4/kernel/events/core.c 2011-09-02 18:11:21.000000000 -0400
59199+++ linux-3.0.4/kernel/events/core.c 2011-09-14 09:08:05.000000000 -0400
59200@@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
59201 return 0;
59202 }
59203
59204-static atomic64_t perf_event_id;
59205+static atomic64_unchecked_t perf_event_id;
59206
59207 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
59208 enum event_type_t event_type);
59209@@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
59210
59211 static inline u64 perf_event_count(struct perf_event *event)
59212 {
59213- return local64_read(&event->count) + atomic64_read(&event->child_count);
59214+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
59215 }
59216
59217 static u64 perf_event_read(struct perf_event *event)
59218@@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
59219 mutex_lock(&event->child_mutex);
59220 total += perf_event_read(event);
59221 *enabled += event->total_time_enabled +
59222- atomic64_read(&event->child_total_time_enabled);
59223+ atomic64_read_unchecked(&event->child_total_time_enabled);
59224 *running += event->total_time_running +
59225- atomic64_read(&event->child_total_time_running);
59226+ atomic64_read_unchecked(&event->child_total_time_running);
59227
59228 list_for_each_entry(child, &event->child_list, child_list) {
59229 total += perf_event_read(child);
59230@@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
59231 userpg->offset -= local64_read(&event->hw.prev_count);
59232
59233 userpg->time_enabled = event->total_time_enabled +
59234- atomic64_read(&event->child_total_time_enabled);
59235+ atomic64_read_unchecked(&event->child_total_time_enabled);
59236
59237 userpg->time_running = event->total_time_running +
59238- atomic64_read(&event->child_total_time_running);
59239+ atomic64_read_unchecked(&event->child_total_time_running);
59240
59241 barrier();
59242 ++userpg->lock;
59243@@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
59244 values[n++] = perf_event_count(event);
59245 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
59246 values[n++] = enabled +
59247- atomic64_read(&event->child_total_time_enabled);
59248+ atomic64_read_unchecked(&event->child_total_time_enabled);
59249 }
59250 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
59251 values[n++] = running +
59252- atomic64_read(&event->child_total_time_running);
59253+ atomic64_read_unchecked(&event->child_total_time_running);
59254 }
59255 if (read_format & PERF_FORMAT_ID)
59256 values[n++] = primary_event_id(event);
59257@@ -4833,12 +4833,12 @@ static void perf_event_mmap_event(struct
59258 * need to add enough zero bytes after the string to handle
59259 * the 64bit alignment we do later.
59260 */
59261- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
59262+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
59263 if (!buf) {
59264 name = strncpy(tmp, "//enomem", sizeof(tmp));
59265 goto got_name;
59266 }
59267- name = d_path(&file->f_path, buf, PATH_MAX);
59268+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
59269 if (IS_ERR(name)) {
59270 name = strncpy(tmp, "//toolong", sizeof(tmp));
59271 goto got_name;
59272@@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
59273 event->parent = parent_event;
59274
59275 event->ns = get_pid_ns(current->nsproxy->pid_ns);
59276- event->id = atomic64_inc_return(&perf_event_id);
59277+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
59278
59279 event->state = PERF_EVENT_STATE_INACTIVE;
59280
59281@@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
59282 /*
59283 * Add back the child's count to the parent's count:
59284 */
59285- atomic64_add(child_val, &parent_event->child_count);
59286- atomic64_add(child_event->total_time_enabled,
59287+ atomic64_add_unchecked(child_val, &parent_event->child_count);
59288+ atomic64_add_unchecked(child_event->total_time_enabled,
59289 &parent_event->child_total_time_enabled);
59290- atomic64_add(child_event->total_time_running,
59291+ atomic64_add_unchecked(child_event->total_time_running,
59292 &parent_event->child_total_time_running);
59293
59294 /*
59295diff -urNp linux-3.0.4/kernel/exit.c linux-3.0.4/kernel/exit.c
59296--- linux-3.0.4/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
59297+++ linux-3.0.4/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
59298@@ -57,6 +57,10 @@
59299 #include <asm/pgtable.h>
59300 #include <asm/mmu_context.h>
59301
59302+#ifdef CONFIG_GRKERNSEC
59303+extern rwlock_t grsec_exec_file_lock;
59304+#endif
59305+
59306 static void exit_mm(struct task_struct * tsk);
59307
59308 static void __unhash_process(struct task_struct *p, bool group_dead)
59309@@ -169,6 +173,10 @@ void release_task(struct task_struct * p
59310 struct task_struct *leader;
59311 int zap_leader;
59312 repeat:
59313+#ifdef CONFIG_NET
59314+ gr_del_task_from_ip_table(p);
59315+#endif
59316+
59317 tracehook_prepare_release_task(p);
59318 /* don't need to get the RCU readlock here - the process is dead and
59319 * can't be modifying its own credentials. But shut RCU-lockdep up */
59320@@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
59321 {
59322 write_lock_irq(&tasklist_lock);
59323
59324+#ifdef CONFIG_GRKERNSEC
59325+ write_lock(&grsec_exec_file_lock);
59326+ if (current->exec_file) {
59327+ fput(current->exec_file);
59328+ current->exec_file = NULL;
59329+ }
59330+ write_unlock(&grsec_exec_file_lock);
59331+#endif
59332+
59333 ptrace_unlink(current);
59334 /* Reparent to init */
59335 current->real_parent = current->parent = kthreadd_task;
59336 list_move_tail(&current->sibling, &current->real_parent->children);
59337
59338+ gr_set_kernel_label(current);
59339+
59340 /* Set the exit signal to SIGCHLD so we signal init on exit */
59341 current->exit_signal = SIGCHLD;
59342
59343@@ -394,7 +413,7 @@ int allow_signal(int sig)
59344 * know it'll be handled, so that they don't get converted to
59345 * SIGKILL or just silently dropped.
59346 */
59347- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
59348+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
59349 recalc_sigpending();
59350 spin_unlock_irq(&current->sighand->siglock);
59351 return 0;
59352@@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
59353 vsnprintf(current->comm, sizeof(current->comm), name, args);
59354 va_end(args);
59355
59356+#ifdef CONFIG_GRKERNSEC
59357+ write_lock(&grsec_exec_file_lock);
59358+ if (current->exec_file) {
59359+ fput(current->exec_file);
59360+ current->exec_file = NULL;
59361+ }
59362+ write_unlock(&grsec_exec_file_lock);
59363+#endif
59364+
59365+ gr_set_kernel_label(current);
59366+
59367 /*
59368 * If we were started as result of loading a module, close all of the
59369 * user space pages. We don't need them, and if we didn't close them
59370@@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
59371 struct task_struct *tsk = current;
59372 int group_dead;
59373
59374- profile_task_exit(tsk);
59375-
59376- WARN_ON(atomic_read(&tsk->fs_excl));
59377- WARN_ON(blk_needs_flush_plug(tsk));
59378-
59379 if (unlikely(in_interrupt()))
59380 panic("Aiee, killing interrupt handler!");
59381- if (unlikely(!tsk->pid))
59382- panic("Attempted to kill the idle task!");
59383
59384 /*
59385 * If do_exit is called because this processes oopsed, it's possible
59386@@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
59387 */
59388 set_fs(USER_DS);
59389
59390+ profile_task_exit(tsk);
59391+
59392+ WARN_ON(atomic_read(&tsk->fs_excl));
59393+ WARN_ON(blk_needs_flush_plug(tsk));
59394+
59395+ if (unlikely(!tsk->pid))
59396+ panic("Attempted to kill the idle task!");
59397+
59398 tracehook_report_exit(&code);
59399
59400 validate_creds_for_do_exit(tsk);
59401@@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
59402 tsk->exit_code = code;
59403 taskstats_exit(tsk, group_dead);
59404
59405+ gr_acl_handle_psacct(tsk, code);
59406+ gr_acl_handle_exit();
59407+
59408 exit_mm(tsk);
59409
59410 if (group_dead)
59411diff -urNp linux-3.0.4/kernel/fork.c linux-3.0.4/kernel/fork.c
59412--- linux-3.0.4/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
59413+++ linux-3.0.4/kernel/fork.c 2011-08-25 17:23:36.000000000 -0400
59414@@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
59415 *stackend = STACK_END_MAGIC; /* for overflow detection */
59416
59417 #ifdef CONFIG_CC_STACKPROTECTOR
59418- tsk->stack_canary = get_random_int();
59419+ tsk->stack_canary = pax_get_random_long();
59420 #endif
59421
59422 /* One for us, one for whoever does the "release_task()" (usually parent) */
59423@@ -308,13 +308,77 @@ out:
59424 }
59425
59426 #ifdef CONFIG_MMU
59427+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
59428+{
59429+ struct vm_area_struct *tmp;
59430+ unsigned long charge;
59431+ struct mempolicy *pol;
59432+ struct file *file;
59433+
59434+ charge = 0;
59435+ if (mpnt->vm_flags & VM_ACCOUNT) {
59436+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
59437+ if (security_vm_enough_memory(len))
59438+ goto fail_nomem;
59439+ charge = len;
59440+ }
59441+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
59442+ if (!tmp)
59443+ goto fail_nomem;
59444+ *tmp = *mpnt;
59445+ tmp->vm_mm = mm;
59446+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
59447+ pol = mpol_dup(vma_policy(mpnt));
59448+ if (IS_ERR(pol))
59449+ goto fail_nomem_policy;
59450+ vma_set_policy(tmp, pol);
59451+ if (anon_vma_fork(tmp, mpnt))
59452+ goto fail_nomem_anon_vma_fork;
59453+ tmp->vm_flags &= ~VM_LOCKED;
59454+ tmp->vm_next = tmp->vm_prev = NULL;
59455+ tmp->vm_mirror = NULL;
59456+ file = tmp->vm_file;
59457+ if (file) {
59458+ struct inode *inode = file->f_path.dentry->d_inode;
59459+ struct address_space *mapping = file->f_mapping;
59460+
59461+ get_file(file);
59462+ if (tmp->vm_flags & VM_DENYWRITE)
59463+ atomic_dec(&inode->i_writecount);
59464+ mutex_lock(&mapping->i_mmap_mutex);
59465+ if (tmp->vm_flags & VM_SHARED)
59466+ mapping->i_mmap_writable++;
59467+ flush_dcache_mmap_lock(mapping);
59468+ /* insert tmp into the share list, just after mpnt */
59469+ vma_prio_tree_add(tmp, mpnt);
59470+ flush_dcache_mmap_unlock(mapping);
59471+ mutex_unlock(&mapping->i_mmap_mutex);
59472+ }
59473+
59474+ /*
59475+ * Clear hugetlb-related page reserves for children. This only
59476+ * affects MAP_PRIVATE mappings. Faults generated by the child
59477+ * are not guaranteed to succeed, even if read-only
59478+ */
59479+ if (is_vm_hugetlb_page(tmp))
59480+ reset_vma_resv_huge_pages(tmp);
59481+
59482+ return tmp;
59483+
59484+fail_nomem_anon_vma_fork:
59485+ mpol_put(pol);
59486+fail_nomem_policy:
59487+ kmem_cache_free(vm_area_cachep, tmp);
59488+fail_nomem:
59489+ vm_unacct_memory(charge);
59490+ return NULL;
59491+}
59492+
59493 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
59494 {
59495 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
59496 struct rb_node **rb_link, *rb_parent;
59497 int retval;
59498- unsigned long charge;
59499- struct mempolicy *pol;
59500
59501 down_write(&oldmm->mmap_sem);
59502 flush_cache_dup_mm(oldmm);
59503@@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
59504 mm->locked_vm = 0;
59505 mm->mmap = NULL;
59506 mm->mmap_cache = NULL;
59507- mm->free_area_cache = oldmm->mmap_base;
59508- mm->cached_hole_size = ~0UL;
59509+ mm->free_area_cache = oldmm->free_area_cache;
59510+ mm->cached_hole_size = oldmm->cached_hole_size;
59511 mm->map_count = 0;
59512 cpumask_clear(mm_cpumask(mm));
59513 mm->mm_rb = RB_ROOT;
59514@@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
59515
59516 prev = NULL;
59517 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
59518- struct file *file;
59519-
59520 if (mpnt->vm_flags & VM_DONTCOPY) {
59521 long pages = vma_pages(mpnt);
59522 mm->total_vm -= pages;
59523@@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
59524 -pages);
59525 continue;
59526 }
59527- charge = 0;
59528- if (mpnt->vm_flags & VM_ACCOUNT) {
59529- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
59530- if (security_vm_enough_memory(len))
59531- goto fail_nomem;
59532- charge = len;
59533- }
59534- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
59535- if (!tmp)
59536- goto fail_nomem;
59537- *tmp = *mpnt;
59538- INIT_LIST_HEAD(&tmp->anon_vma_chain);
59539- pol = mpol_dup(vma_policy(mpnt));
59540- retval = PTR_ERR(pol);
59541- if (IS_ERR(pol))
59542- goto fail_nomem_policy;
59543- vma_set_policy(tmp, pol);
59544- tmp->vm_mm = mm;
59545- if (anon_vma_fork(tmp, mpnt))
59546- goto fail_nomem_anon_vma_fork;
59547- tmp->vm_flags &= ~VM_LOCKED;
59548- tmp->vm_next = tmp->vm_prev = NULL;
59549- file = tmp->vm_file;
59550- if (file) {
59551- struct inode *inode = file->f_path.dentry->d_inode;
59552- struct address_space *mapping = file->f_mapping;
59553-
59554- get_file(file);
59555- if (tmp->vm_flags & VM_DENYWRITE)
59556- atomic_dec(&inode->i_writecount);
59557- mutex_lock(&mapping->i_mmap_mutex);
59558- if (tmp->vm_flags & VM_SHARED)
59559- mapping->i_mmap_writable++;
59560- flush_dcache_mmap_lock(mapping);
59561- /* insert tmp into the share list, just after mpnt */
59562- vma_prio_tree_add(tmp, mpnt);
59563- flush_dcache_mmap_unlock(mapping);
59564- mutex_unlock(&mapping->i_mmap_mutex);
59565+ tmp = dup_vma(mm, mpnt);
59566+ if (!tmp) {
59567+ retval = -ENOMEM;
59568+ goto out;
59569 }
59570
59571 /*
59572- * Clear hugetlb-related page reserves for children. This only
59573- * affects MAP_PRIVATE mappings. Faults generated by the child
59574- * are not guaranteed to succeed, even if read-only
59575- */
59576- if (is_vm_hugetlb_page(tmp))
59577- reset_vma_resv_huge_pages(tmp);
59578-
59579- /*
59580 * Link in the new vma and copy the page table entries.
59581 */
59582 *pprev = tmp;
59583@@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
59584 if (retval)
59585 goto out;
59586 }
59587+
59588+#ifdef CONFIG_PAX_SEGMEXEC
59589+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
59590+ struct vm_area_struct *mpnt_m;
59591+
59592+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
59593+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
59594+
59595+ if (!mpnt->vm_mirror)
59596+ continue;
59597+
59598+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
59599+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
59600+ mpnt->vm_mirror = mpnt_m;
59601+ } else {
59602+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
59603+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
59604+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
59605+ mpnt->vm_mirror->vm_mirror = mpnt;
59606+ }
59607+ }
59608+ BUG_ON(mpnt_m);
59609+ }
59610+#endif
59611+
59612 /* a new mm has just been created */
59613 arch_dup_mmap(oldmm, mm);
59614 retval = 0;
59615@@ -429,14 +474,6 @@ out:
59616 flush_tlb_mm(oldmm);
59617 up_write(&oldmm->mmap_sem);
59618 return retval;
59619-fail_nomem_anon_vma_fork:
59620- mpol_put(pol);
59621-fail_nomem_policy:
59622- kmem_cache_free(vm_area_cachep, tmp);
59623-fail_nomem:
59624- retval = -ENOMEM;
59625- vm_unacct_memory(charge);
59626- goto out;
59627 }
59628
59629 static inline int mm_alloc_pgd(struct mm_struct * mm)
59630@@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
59631 spin_unlock(&fs->lock);
59632 return -EAGAIN;
59633 }
59634- fs->users++;
59635+ atomic_inc(&fs->users);
59636 spin_unlock(&fs->lock);
59637 return 0;
59638 }
59639 tsk->fs = copy_fs_struct(fs);
59640 if (!tsk->fs)
59641 return -ENOMEM;
59642+ gr_set_chroot_entries(tsk, &tsk->fs->root);
59643 return 0;
59644 }
59645
59646@@ -1104,12 +1142,16 @@ static struct task_struct *copy_process(
59647 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
59648 #endif
59649 retval = -EAGAIN;
59650+
59651+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
59652+
59653 if (atomic_read(&p->real_cred->user->processes) >=
59654 task_rlimit(p, RLIMIT_NPROC)) {
59655- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
59656- p->real_cred->user != INIT_USER)
59657+ if (p->real_cred->user != INIT_USER &&
59658+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
59659 goto bad_fork_free;
59660 }
59661+ current->flags &= ~PF_NPROC_EXCEEDED;
59662
59663 retval = copy_creds(p, clone_flags);
59664 if (retval < 0)
59665@@ -1250,6 +1292,8 @@ static struct task_struct *copy_process(
59666 if (clone_flags & CLONE_THREAD)
59667 p->tgid = current->tgid;
59668
59669+ gr_copy_label(p);
59670+
59671 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
59672 /*
59673 * Clear TID on mm_release()?
59674@@ -1414,6 +1458,8 @@ bad_fork_cleanup_count:
59675 bad_fork_free:
59676 free_task(p);
59677 fork_out:
59678+ gr_log_forkfail(retval);
59679+
59680 return ERR_PTR(retval);
59681 }
59682
59683@@ -1502,6 +1548,8 @@ long do_fork(unsigned long clone_flags,
59684 if (clone_flags & CLONE_PARENT_SETTID)
59685 put_user(nr, parent_tidptr);
59686
59687+ gr_handle_brute_check();
59688+
59689 if (clone_flags & CLONE_VFORK) {
59690 p->vfork_done = &vfork;
59691 init_completion(&vfork);
59692@@ -1610,7 +1658,7 @@ static int unshare_fs(unsigned long unsh
59693 return 0;
59694
59695 /* don't need lock here; in the worst case we'll do useless copy */
59696- if (fs->users == 1)
59697+ if (atomic_read(&fs->users) == 1)
59698 return 0;
59699
59700 *new_fsp = copy_fs_struct(fs);
59701@@ -1697,7 +1745,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
59702 fs = current->fs;
59703 spin_lock(&fs->lock);
59704 current->fs = new_fs;
59705- if (--fs->users)
59706+ gr_set_chroot_entries(current, &current->fs->root);
59707+ if (atomic_dec_return(&fs->users))
59708 new_fs = NULL;
59709 else
59710 new_fs = fs;
59711diff -urNp linux-3.0.4/kernel/futex.c linux-3.0.4/kernel/futex.c
59712--- linux-3.0.4/kernel/futex.c 2011-09-02 18:11:21.000000000 -0400
59713+++ linux-3.0.4/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
59714@@ -54,6 +54,7 @@
59715 #include <linux/mount.h>
59716 #include <linux/pagemap.h>
59717 #include <linux/syscalls.h>
59718+#include <linux/ptrace.h>
59719 #include <linux/signal.h>
59720 #include <linux/module.h>
59721 #include <linux/magic.h>
59722@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
59723 struct page *page, *page_head;
59724 int err, ro = 0;
59725
59726+#ifdef CONFIG_PAX_SEGMEXEC
59727+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
59728+ return -EFAULT;
59729+#endif
59730+
59731 /*
59732 * The futex address must be "naturally" aligned.
59733 */
59734@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
59735 struct futex_q q = futex_q_init;
59736 int ret;
59737
59738+ pax_track_stack();
59739+
59740 if (!bitset)
59741 return -EINVAL;
59742 q.bitset = bitset;
59743@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
59744 struct futex_q q = futex_q_init;
59745 int res, ret;
59746
59747+ pax_track_stack();
59748+
59749 if (!bitset)
59750 return -EINVAL;
59751
59752@@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59753 {
59754 struct robust_list_head __user *head;
59755 unsigned long ret;
59756+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59757 const struct cred *cred = current_cred(), *pcred;
59758+#endif
59759
59760 if (!futex_cmpxchg_enabled)
59761 return -ENOSYS;
59762@@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59763 if (!p)
59764 goto err_unlock;
59765 ret = -EPERM;
59766+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59767+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
59768+ goto err_unlock;
59769+#else
59770 pcred = __task_cred(p);
59771 /* If victim is in different user_ns, then uids are not
59772 comparable, so we must have CAP_SYS_PTRACE */
59773@@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59774 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
59775 goto err_unlock;
59776 ok:
59777+#endif
59778 head = p->robust_list;
59779 rcu_read_unlock();
59780 }
59781@@ -2712,6 +2729,7 @@ static int __init futex_init(void)
59782 {
59783 u32 curval;
59784 int i;
59785+ mm_segment_t oldfs;
59786
59787 /*
59788 * This will fail and we want it. Some arch implementations do
59789@@ -2723,8 +2741,11 @@ static int __init futex_init(void)
59790 * implementation, the non-functional ones will return
59791 * -ENOSYS.
59792 */
59793+ oldfs = get_fs();
59794+ set_fs(USER_DS);
59795 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
59796 futex_cmpxchg_enabled = 1;
59797+ set_fs(oldfs);
59798
59799 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
59800 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
59801diff -urNp linux-3.0.4/kernel/futex_compat.c linux-3.0.4/kernel/futex_compat.c
59802--- linux-3.0.4/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
59803+++ linux-3.0.4/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
59804@@ -10,6 +10,7 @@
59805 #include <linux/compat.h>
59806 #include <linux/nsproxy.h>
59807 #include <linux/futex.h>
59808+#include <linux/ptrace.h>
59809
59810 #include <asm/uaccess.h>
59811
59812@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
59813 {
59814 struct compat_robust_list_head __user *head;
59815 unsigned long ret;
59816- const struct cred *cred = current_cred(), *pcred;
59817+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59818+ const struct cred *cred = current_cred();
59819+ const struct cred *pcred;
59820+#endif
59821
59822 if (!futex_cmpxchg_enabled)
59823 return -ENOSYS;
59824@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
59825 if (!p)
59826 goto err_unlock;
59827 ret = -EPERM;
59828+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59829+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
59830+ goto err_unlock;
59831+#else
59832 pcred = __task_cred(p);
59833 /* If victim is in different user_ns, then uids are not
59834 comparable, so we must have CAP_SYS_PTRACE */
59835@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
59836 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
59837 goto err_unlock;
59838 ok:
59839+#endif
59840 head = p->compat_robust_list;
59841 rcu_read_unlock();
59842 }
59843diff -urNp linux-3.0.4/kernel/gcov/base.c linux-3.0.4/kernel/gcov/base.c
59844--- linux-3.0.4/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
59845+++ linux-3.0.4/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
59846@@ -102,11 +102,6 @@ void gcov_enable_events(void)
59847 }
59848
59849 #ifdef CONFIG_MODULES
59850-static inline int within(void *addr, void *start, unsigned long size)
59851-{
59852- return ((addr >= start) && (addr < start + size));
59853-}
59854-
59855 /* Update list and generate events when modules are unloaded. */
59856 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
59857 void *data)
59858@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
59859 prev = NULL;
59860 /* Remove entries located in module from linked list. */
59861 for (info = gcov_info_head; info; info = info->next) {
59862- if (within(info, mod->module_core, mod->core_size)) {
59863+ if (within_module_core_rw((unsigned long)info, mod)) {
59864 if (prev)
59865 prev->next = info->next;
59866 else
59867diff -urNp linux-3.0.4/kernel/hrtimer.c linux-3.0.4/kernel/hrtimer.c
59868--- linux-3.0.4/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
59869+++ linux-3.0.4/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
59870@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
59871 local_irq_restore(flags);
59872 }
59873
59874-static void run_hrtimer_softirq(struct softirq_action *h)
59875+static void run_hrtimer_softirq(void)
59876 {
59877 hrtimer_peek_ahead_timers();
59878 }
59879diff -urNp linux-3.0.4/kernel/jump_label.c linux-3.0.4/kernel/jump_label.c
59880--- linux-3.0.4/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
59881+++ linux-3.0.4/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
59882@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
59883
59884 size = (((unsigned long)stop - (unsigned long)start)
59885 / sizeof(struct jump_entry));
59886+ pax_open_kernel();
59887 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
59888+ pax_close_kernel();
59889 }
59890
59891 static void jump_label_update(struct jump_label_key *key, int enable);
59892@@ -297,10 +299,12 @@ static void jump_label_invalidate_module
59893 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
59894 struct jump_entry *iter;
59895
59896+ pax_open_kernel();
59897 for (iter = iter_start; iter < iter_stop; iter++) {
59898 if (within_module_init(iter->code, mod))
59899 iter->code = 0;
59900 }
59901+ pax_close_kernel();
59902 }
59903
59904 static int
59905diff -urNp linux-3.0.4/kernel/kallsyms.c linux-3.0.4/kernel/kallsyms.c
59906--- linux-3.0.4/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
59907+++ linux-3.0.4/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
59908@@ -11,6 +11,9 @@
59909 * Changed the compression method from stem compression to "table lookup"
59910 * compression (see scripts/kallsyms.c for a more complete description)
59911 */
59912+#ifdef CONFIG_GRKERNSEC_HIDESYM
59913+#define __INCLUDED_BY_HIDESYM 1
59914+#endif
59915 #include <linux/kallsyms.h>
59916 #include <linux/module.h>
59917 #include <linux/init.h>
59918@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
59919
59920 static inline int is_kernel_inittext(unsigned long addr)
59921 {
59922+ if (system_state != SYSTEM_BOOTING)
59923+ return 0;
59924+
59925 if (addr >= (unsigned long)_sinittext
59926 && addr <= (unsigned long)_einittext)
59927 return 1;
59928 return 0;
59929 }
59930
59931+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59932+#ifdef CONFIG_MODULES
59933+static inline int is_module_text(unsigned long addr)
59934+{
59935+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
59936+ return 1;
59937+
59938+ addr = ktla_ktva(addr);
59939+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
59940+}
59941+#else
59942+static inline int is_module_text(unsigned long addr)
59943+{
59944+ return 0;
59945+}
59946+#endif
59947+#endif
59948+
59949 static inline int is_kernel_text(unsigned long addr)
59950 {
59951 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
59952@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
59953
59954 static inline int is_kernel(unsigned long addr)
59955 {
59956+
59957+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59958+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
59959+ return 1;
59960+
59961+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
59962+#else
59963 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
59964+#endif
59965+
59966 return 1;
59967 return in_gate_area_no_mm(addr);
59968 }
59969
59970 static int is_ksym_addr(unsigned long addr)
59971 {
59972+
59973+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59974+ if (is_module_text(addr))
59975+ return 0;
59976+#endif
59977+
59978 if (all_var)
59979 return is_kernel(addr);
59980
59981@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
59982
59983 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
59984 {
59985- iter->name[0] = '\0';
59986 iter->nameoff = get_symbol_offset(new_pos);
59987 iter->pos = new_pos;
59988 }
59989@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
59990 {
59991 struct kallsym_iter *iter = m->private;
59992
59993+#ifdef CONFIG_GRKERNSEC_HIDESYM
59994+ if (current_uid())
59995+ return 0;
59996+#endif
59997+
59998 /* Some debugging symbols have no name. Ignore them. */
59999 if (!iter->name[0])
60000 return 0;
60001@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
60002 struct kallsym_iter *iter;
60003 int ret;
60004
60005- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
60006+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
60007 if (!iter)
60008 return -ENOMEM;
60009 reset_iter(iter, 0);
60010diff -urNp linux-3.0.4/kernel/kmod.c linux-3.0.4/kernel/kmod.c
60011--- linux-3.0.4/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
60012+++ linux-3.0.4/kernel/kmod.c 2011-08-23 21:48:14.000000000 -0400
60013@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
60014 * If module auto-loading support is disabled then this function
60015 * becomes a no-operation.
60016 */
60017-int __request_module(bool wait, const char *fmt, ...)
60018+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
60019 {
60020- va_list args;
60021 char module_name[MODULE_NAME_LEN];
60022 unsigned int max_modprobes;
60023 int ret;
60024- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
60025+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
60026 static char *envp[] = { "HOME=/",
60027 "TERM=linux",
60028 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
60029@@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
60030 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
60031 static int kmod_loop_msg;
60032
60033- va_start(args, fmt);
60034- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
60035- va_end(args);
60036+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
60037 if (ret >= MODULE_NAME_LEN)
60038 return -ENAMETOOLONG;
60039
60040@@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
60041 if (ret)
60042 return ret;
60043
60044+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60045+ if (!current_uid()) {
60046+ /* hack to workaround consolekit/udisks stupidity */
60047+ read_lock(&tasklist_lock);
60048+ if (!strcmp(current->comm, "mount") &&
60049+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
60050+ read_unlock(&tasklist_lock);
60051+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
60052+ return -EPERM;
60053+ }
60054+ read_unlock(&tasklist_lock);
60055+ }
60056+#endif
60057+
60058 /* If modprobe needs a service that is in a module, we get a recursive
60059 * loop. Limit the number of running kmod threads to max_threads/2 or
60060 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
60061@@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
60062 atomic_dec(&kmod_concurrent);
60063 return ret;
60064 }
60065+
60066+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
60067+{
60068+ va_list args;
60069+ int ret;
60070+
60071+ va_start(args, fmt);
60072+ ret = ____request_module(wait, module_param, fmt, args);
60073+ va_end(args);
60074+
60075+ return ret;
60076+}
60077+
60078+int __request_module(bool wait, const char *fmt, ...)
60079+{
60080+ va_list args;
60081+ int ret;
60082+
60083+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60084+ if (current_uid()) {
60085+ char module_param[MODULE_NAME_LEN];
60086+
60087+ memset(module_param, 0, sizeof(module_param));
60088+
60089+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
60090+
60091+ va_start(args, fmt);
60092+ ret = ____request_module(wait, module_param, fmt, args);
60093+ va_end(args);
60094+
60095+ return ret;
60096+ }
60097+#endif
60098+
60099+ va_start(args, fmt);
60100+ ret = ____request_module(wait, NULL, fmt, args);
60101+ va_end(args);
60102+
60103+ return ret;
60104+}
60105+
60106 EXPORT_SYMBOL(__request_module);
60107 #endif /* CONFIG_MODULES */
60108
60109diff -urNp linux-3.0.4/kernel/kprobes.c linux-3.0.4/kernel/kprobes.c
60110--- linux-3.0.4/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
60111+++ linux-3.0.4/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
60112@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
60113 * kernel image and loaded module images reside. This is required
60114 * so x86_64 can correctly handle the %rip-relative fixups.
60115 */
60116- kip->insns = module_alloc(PAGE_SIZE);
60117+ kip->insns = module_alloc_exec(PAGE_SIZE);
60118 if (!kip->insns) {
60119 kfree(kip);
60120 return NULL;
60121@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
60122 */
60123 if (!list_is_singular(&kip->list)) {
60124 list_del(&kip->list);
60125- module_free(NULL, kip->insns);
60126+ module_free_exec(NULL, kip->insns);
60127 kfree(kip);
60128 }
60129 return 1;
60130@@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
60131 {
60132 int i, err = 0;
60133 unsigned long offset = 0, size = 0;
60134- char *modname, namebuf[128];
60135+ char *modname, namebuf[KSYM_NAME_LEN];
60136 const char *symbol_name;
60137 void *addr;
60138 struct kprobe_blackpoint *kb;
60139@@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
60140 const char *sym = NULL;
60141 unsigned int i = *(loff_t *) v;
60142 unsigned long offset = 0;
60143- char *modname, namebuf[128];
60144+ char *modname, namebuf[KSYM_NAME_LEN];
60145
60146 head = &kprobe_table[i];
60147 preempt_disable();
60148diff -urNp linux-3.0.4/kernel/lockdep.c linux-3.0.4/kernel/lockdep.c
60149--- linux-3.0.4/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
60150+++ linux-3.0.4/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
60151@@ -583,6 +583,10 @@ static int static_obj(void *obj)
60152 end = (unsigned long) &_end,
60153 addr = (unsigned long) obj;
60154
60155+#ifdef CONFIG_PAX_KERNEXEC
60156+ start = ktla_ktva(start);
60157+#endif
60158+
60159 /*
60160 * static variable?
60161 */
60162@@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
60163 if (!static_obj(lock->key)) {
60164 debug_locks_off();
60165 printk("INFO: trying to register non-static key.\n");
60166+ printk("lock:%pS key:%pS.\n", lock, lock->key);
60167 printk("the code is fine but needs lockdep annotation.\n");
60168 printk("turning off the locking correctness validator.\n");
60169 dump_stack();
60170@@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
60171 if (!class)
60172 return 0;
60173 }
60174- atomic_inc((atomic_t *)&class->ops);
60175+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
60176 if (very_verbose(class)) {
60177 printk("\nacquire class [%p] %s", class->key, class->name);
60178 if (class->name_version > 1)
60179diff -urNp linux-3.0.4/kernel/lockdep_proc.c linux-3.0.4/kernel/lockdep_proc.c
60180--- linux-3.0.4/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
60181+++ linux-3.0.4/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
60182@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
60183
60184 static void print_name(struct seq_file *m, struct lock_class *class)
60185 {
60186- char str[128];
60187+ char str[KSYM_NAME_LEN];
60188 const char *name = class->name;
60189
60190 if (!name) {
60191diff -urNp linux-3.0.4/kernel/module.c linux-3.0.4/kernel/module.c
60192--- linux-3.0.4/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
60193+++ linux-3.0.4/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
60194@@ -58,6 +58,7 @@
60195 #include <linux/jump_label.h>
60196 #include <linux/pfn.h>
60197 #include <linux/bsearch.h>
60198+#include <linux/grsecurity.h>
60199
60200 #define CREATE_TRACE_POINTS
60201 #include <trace/events/module.h>
60202@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
60203
60204 /* Bounds of module allocation, for speeding __module_address.
60205 * Protected by module_mutex. */
60206-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
60207+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
60208+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
60209
60210 int register_module_notifier(struct notifier_block * nb)
60211 {
60212@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
60213 return true;
60214
60215 list_for_each_entry_rcu(mod, &modules, list) {
60216- struct symsearch arr[] = {
60217+ struct symsearch modarr[] = {
60218 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
60219 NOT_GPL_ONLY, false },
60220 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
60221@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
60222 #endif
60223 };
60224
60225- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
60226+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
60227 return true;
60228 }
60229 return false;
60230@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
60231 static int percpu_modalloc(struct module *mod,
60232 unsigned long size, unsigned long align)
60233 {
60234- if (align > PAGE_SIZE) {
60235+ if (align-1 >= PAGE_SIZE) {
60236 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
60237 mod->name, align, PAGE_SIZE);
60238 align = PAGE_SIZE;
60239@@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
60240 */
60241 #ifdef CONFIG_SYSFS
60242
60243-#ifdef CONFIG_KALLSYMS
60244+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60245 static inline bool sect_empty(const Elf_Shdr *sect)
60246 {
60247 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
60248@@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
60249
60250 static void unset_module_core_ro_nx(struct module *mod)
60251 {
60252- set_page_attributes(mod->module_core + mod->core_text_size,
60253- mod->module_core + mod->core_size,
60254+ set_page_attributes(mod->module_core_rw,
60255+ mod->module_core_rw + mod->core_size_rw,
60256 set_memory_x);
60257- set_page_attributes(mod->module_core,
60258- mod->module_core + mod->core_ro_size,
60259+ set_page_attributes(mod->module_core_rx,
60260+ mod->module_core_rx + mod->core_size_rx,
60261 set_memory_rw);
60262 }
60263
60264 static void unset_module_init_ro_nx(struct module *mod)
60265 {
60266- set_page_attributes(mod->module_init + mod->init_text_size,
60267- mod->module_init + mod->init_size,
60268+ set_page_attributes(mod->module_init_rw,
60269+ mod->module_init_rw + mod->init_size_rw,
60270 set_memory_x);
60271- set_page_attributes(mod->module_init,
60272- mod->module_init + mod->init_ro_size,
60273+ set_page_attributes(mod->module_init_rx,
60274+ mod->module_init_rx + mod->init_size_rx,
60275 set_memory_rw);
60276 }
60277
60278@@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
60279
60280 mutex_lock(&module_mutex);
60281 list_for_each_entry_rcu(mod, &modules, list) {
60282- if ((mod->module_core) && (mod->core_text_size)) {
60283- set_page_attributes(mod->module_core,
60284- mod->module_core + mod->core_text_size,
60285+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
60286+ set_page_attributes(mod->module_core_rx,
60287+ mod->module_core_rx + mod->core_size_rx,
60288 set_memory_rw);
60289 }
60290- if ((mod->module_init) && (mod->init_text_size)) {
60291- set_page_attributes(mod->module_init,
60292- mod->module_init + mod->init_text_size,
60293+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
60294+ set_page_attributes(mod->module_init_rx,
60295+ mod->module_init_rx + mod->init_size_rx,
60296 set_memory_rw);
60297 }
60298 }
60299@@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
60300
60301 mutex_lock(&module_mutex);
60302 list_for_each_entry_rcu(mod, &modules, list) {
60303- if ((mod->module_core) && (mod->core_text_size)) {
60304- set_page_attributes(mod->module_core,
60305- mod->module_core + mod->core_text_size,
60306+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
60307+ set_page_attributes(mod->module_core_rx,
60308+ mod->module_core_rx + mod->core_size_rx,
60309 set_memory_ro);
60310 }
60311- if ((mod->module_init) && (mod->init_text_size)) {
60312- set_page_attributes(mod->module_init,
60313- mod->module_init + mod->init_text_size,
60314+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
60315+ set_page_attributes(mod->module_init_rx,
60316+ mod->module_init_rx + mod->init_size_rx,
60317 set_memory_ro);
60318 }
60319 }
60320@@ -1722,16 +1724,19 @@ static void free_module(struct module *m
60321
60322 /* This may be NULL, but that's OK */
60323 unset_module_init_ro_nx(mod);
60324- module_free(mod, mod->module_init);
60325+ module_free(mod, mod->module_init_rw);
60326+ module_free_exec(mod, mod->module_init_rx);
60327 kfree(mod->args);
60328 percpu_modfree(mod);
60329
60330 /* Free lock-classes: */
60331- lockdep_free_key_range(mod->module_core, mod->core_size);
60332+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
60333+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
60334
60335 /* Finally, free the core (containing the module structure) */
60336 unset_module_core_ro_nx(mod);
60337- module_free(mod, mod->module_core);
60338+ module_free_exec(mod, mod->module_core_rx);
60339+ module_free(mod, mod->module_core_rw);
60340
60341 #ifdef CONFIG_MPU
60342 update_protections(current->mm);
60343@@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
60344 unsigned int i;
60345 int ret = 0;
60346 const struct kernel_symbol *ksym;
60347+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60348+ int is_fs_load = 0;
60349+ int register_filesystem_found = 0;
60350+ char *p;
60351+
60352+ p = strstr(mod->args, "grsec_modharden_fs");
60353+ if (p) {
60354+ char *endptr = p + strlen("grsec_modharden_fs");
60355+ /* copy \0 as well */
60356+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
60357+ is_fs_load = 1;
60358+ }
60359+#endif
60360
60361 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
60362 const char *name = info->strtab + sym[i].st_name;
60363
60364+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60365+ /* it's a real shame this will never get ripped and copied
60366+ upstream! ;(
60367+ */
60368+ if (is_fs_load && !strcmp(name, "register_filesystem"))
60369+ register_filesystem_found = 1;
60370+#endif
60371+
60372 switch (sym[i].st_shndx) {
60373 case SHN_COMMON:
60374 /* We compiled with -fno-common. These are not
60375@@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
60376 ksym = resolve_symbol_wait(mod, info, name);
60377 /* Ok if resolved. */
60378 if (ksym && !IS_ERR(ksym)) {
60379+ pax_open_kernel();
60380 sym[i].st_value = ksym->value;
60381+ pax_close_kernel();
60382 break;
60383 }
60384
60385@@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
60386 secbase = (unsigned long)mod_percpu(mod);
60387 else
60388 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
60389+ pax_open_kernel();
60390 sym[i].st_value += secbase;
60391+ pax_close_kernel();
60392 break;
60393 }
60394 }
60395
60396+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60397+ if (is_fs_load && !register_filesystem_found) {
60398+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
60399+ ret = -EPERM;
60400+ }
60401+#endif
60402+
60403 return ret;
60404 }
60405
60406@@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
60407 || s->sh_entsize != ~0UL
60408 || strstarts(sname, ".init"))
60409 continue;
60410- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
60411+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60412+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
60413+ else
60414+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
60415 DEBUGP("\t%s\n", name);
60416 }
60417- switch (m) {
60418- case 0: /* executable */
60419- mod->core_size = debug_align(mod->core_size);
60420- mod->core_text_size = mod->core_size;
60421- break;
60422- case 1: /* RO: text and ro-data */
60423- mod->core_size = debug_align(mod->core_size);
60424- mod->core_ro_size = mod->core_size;
60425- break;
60426- case 3: /* whole core */
60427- mod->core_size = debug_align(mod->core_size);
60428- break;
60429- }
60430 }
60431
60432 DEBUGP("Init section allocation order:\n");
60433@@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
60434 || s->sh_entsize != ~0UL
60435 || !strstarts(sname, ".init"))
60436 continue;
60437- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
60438- | INIT_OFFSET_MASK);
60439+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60440+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
60441+ else
60442+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
60443+ s->sh_entsize |= INIT_OFFSET_MASK;
60444 DEBUGP("\t%s\n", sname);
60445 }
60446- switch (m) {
60447- case 0: /* executable */
60448- mod->init_size = debug_align(mod->init_size);
60449- mod->init_text_size = mod->init_size;
60450- break;
60451- case 1: /* RO: text and ro-data */
60452- mod->init_size = debug_align(mod->init_size);
60453- mod->init_ro_size = mod->init_size;
60454- break;
60455- case 3: /* whole init */
60456- mod->init_size = debug_align(mod->init_size);
60457- break;
60458- }
60459 }
60460 }
60461
60462@@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
60463
60464 /* Put symbol section at end of init part of module. */
60465 symsect->sh_flags |= SHF_ALLOC;
60466- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
60467+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
60468 info->index.sym) | INIT_OFFSET_MASK;
60469 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
60470
60471@@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
60472 }
60473
60474 /* Append room for core symbols at end of core part. */
60475- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
60476- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
60477+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
60478+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
60479
60480 /* Put string table section at end of init part of module. */
60481 strsect->sh_flags |= SHF_ALLOC;
60482- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
60483+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
60484 info->index.str) | INIT_OFFSET_MASK;
60485 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
60486
60487 /* Append room for core symbols' strings at end of core part. */
60488- info->stroffs = mod->core_size;
60489+ info->stroffs = mod->core_size_rx;
60490 __set_bit(0, info->strmap);
60491- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
60492+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
60493 }
60494
60495 static void add_kallsyms(struct module *mod, const struct load_info *info)
60496@@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
60497 /* Make sure we get permanent strtab: don't use info->strtab. */
60498 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
60499
60500+ pax_open_kernel();
60501+
60502 /* Set types up while we still have access to sections. */
60503 for (i = 0; i < mod->num_symtab; i++)
60504 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
60505
60506- mod->core_symtab = dst = mod->module_core + info->symoffs;
60507+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
60508 src = mod->symtab;
60509 *dst = *src;
60510 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
60511@@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
60512 }
60513 mod->core_num_syms = ndst;
60514
60515- mod->core_strtab = s = mod->module_core + info->stroffs;
60516+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
60517 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
60518 if (test_bit(i, info->strmap))
60519 *++s = mod->strtab[i];
60520+
60521+ pax_close_kernel();
60522 }
60523 #else
60524 static inline void layout_symtab(struct module *mod, struct load_info *info)
60525@@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
60526 ddebug_remove_module(debug->modname);
60527 }
60528
60529-static void *module_alloc_update_bounds(unsigned long size)
60530+static void *module_alloc_update_bounds_rw(unsigned long size)
60531 {
60532 void *ret = module_alloc(size);
60533
60534 if (ret) {
60535 mutex_lock(&module_mutex);
60536 /* Update module bounds. */
60537- if ((unsigned long)ret < module_addr_min)
60538- module_addr_min = (unsigned long)ret;
60539- if ((unsigned long)ret + size > module_addr_max)
60540- module_addr_max = (unsigned long)ret + size;
60541+ if ((unsigned long)ret < module_addr_min_rw)
60542+ module_addr_min_rw = (unsigned long)ret;
60543+ if ((unsigned long)ret + size > module_addr_max_rw)
60544+ module_addr_max_rw = (unsigned long)ret + size;
60545+ mutex_unlock(&module_mutex);
60546+ }
60547+ return ret;
60548+}
60549+
60550+static void *module_alloc_update_bounds_rx(unsigned long size)
60551+{
60552+ void *ret = module_alloc_exec(size);
60553+
60554+ if (ret) {
60555+ mutex_lock(&module_mutex);
60556+ /* Update module bounds. */
60557+ if ((unsigned long)ret < module_addr_min_rx)
60558+ module_addr_min_rx = (unsigned long)ret;
60559+ if ((unsigned long)ret + size > module_addr_max_rx)
60560+ module_addr_max_rx = (unsigned long)ret + size;
60561 mutex_unlock(&module_mutex);
60562 }
60563 return ret;
60564@@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
60565 void *ptr;
60566
60567 /* Do the allocs. */
60568- ptr = module_alloc_update_bounds(mod->core_size);
60569+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
60570 /*
60571 * The pointer to this block is stored in the module structure
60572 * which is inside the block. Just mark it as not being a
60573@@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
60574 if (!ptr)
60575 return -ENOMEM;
60576
60577- memset(ptr, 0, mod->core_size);
60578- mod->module_core = ptr;
60579+ memset(ptr, 0, mod->core_size_rw);
60580+ mod->module_core_rw = ptr;
60581
60582- ptr = module_alloc_update_bounds(mod->init_size);
60583+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
60584 /*
60585 * The pointer to this block is stored in the module structure
60586 * which is inside the block. This block doesn't need to be
60587 * scanned as it contains data and code that will be freed
60588 * after the module is initialized.
60589 */
60590- kmemleak_ignore(ptr);
60591- if (!ptr && mod->init_size) {
60592- module_free(mod, mod->module_core);
60593+ kmemleak_not_leak(ptr);
60594+ if (!ptr && mod->init_size_rw) {
60595+ module_free(mod, mod->module_core_rw);
60596 return -ENOMEM;
60597 }
60598- memset(ptr, 0, mod->init_size);
60599- mod->module_init = ptr;
60600+ memset(ptr, 0, mod->init_size_rw);
60601+ mod->module_init_rw = ptr;
60602+
60603+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
60604+ kmemleak_not_leak(ptr);
60605+ if (!ptr) {
60606+ module_free(mod, mod->module_init_rw);
60607+ module_free(mod, mod->module_core_rw);
60608+ return -ENOMEM;
60609+ }
60610+
60611+ pax_open_kernel();
60612+ memset(ptr, 0, mod->core_size_rx);
60613+ pax_close_kernel();
60614+ mod->module_core_rx = ptr;
60615+
60616+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
60617+ kmemleak_not_leak(ptr);
60618+ if (!ptr && mod->init_size_rx) {
60619+ module_free_exec(mod, mod->module_core_rx);
60620+ module_free(mod, mod->module_init_rw);
60621+ module_free(mod, mod->module_core_rw);
60622+ return -ENOMEM;
60623+ }
60624+
60625+ pax_open_kernel();
60626+ memset(ptr, 0, mod->init_size_rx);
60627+ pax_close_kernel();
60628+ mod->module_init_rx = ptr;
60629
60630 /* Transfer each section which specifies SHF_ALLOC */
60631 DEBUGP("final section addresses:\n");
60632@@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
60633 if (!(shdr->sh_flags & SHF_ALLOC))
60634 continue;
60635
60636- if (shdr->sh_entsize & INIT_OFFSET_MASK)
60637- dest = mod->module_init
60638- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
60639- else
60640- dest = mod->module_core + shdr->sh_entsize;
60641+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
60642+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
60643+ dest = mod->module_init_rw
60644+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
60645+ else
60646+ dest = mod->module_init_rx
60647+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
60648+ } else {
60649+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
60650+ dest = mod->module_core_rw + shdr->sh_entsize;
60651+ else
60652+ dest = mod->module_core_rx + shdr->sh_entsize;
60653+ }
60654+
60655+ if (shdr->sh_type != SHT_NOBITS) {
60656+
60657+#ifdef CONFIG_PAX_KERNEXEC
60658+#ifdef CONFIG_X86_64
60659+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
60660+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
60661+#endif
60662+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
60663+ pax_open_kernel();
60664+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
60665+ pax_close_kernel();
60666+ } else
60667+#endif
60668
60669- if (shdr->sh_type != SHT_NOBITS)
60670 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
60671+ }
60672 /* Update sh_addr to point to copy in image. */
60673- shdr->sh_addr = (unsigned long)dest;
60674+
60675+#ifdef CONFIG_PAX_KERNEXEC
60676+ if (shdr->sh_flags & SHF_EXECINSTR)
60677+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
60678+ else
60679+#endif
60680+
60681+ shdr->sh_addr = (unsigned long)dest;
60682 DEBUGP("\t0x%lx %s\n",
60683 shdr->sh_addr, info->secstrings + shdr->sh_name);
60684 }
60685@@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
60686 * Do it before processing of module parameters, so the module
60687 * can provide parameter accessor functions of its own.
60688 */
60689- if (mod->module_init)
60690- flush_icache_range((unsigned long)mod->module_init,
60691- (unsigned long)mod->module_init
60692- + mod->init_size);
60693- flush_icache_range((unsigned long)mod->module_core,
60694- (unsigned long)mod->module_core + mod->core_size);
60695+ if (mod->module_init_rx)
60696+ flush_icache_range((unsigned long)mod->module_init_rx,
60697+ (unsigned long)mod->module_init_rx
60698+ + mod->init_size_rx);
60699+ flush_icache_range((unsigned long)mod->module_core_rx,
60700+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
60701
60702 set_fs(old_fs);
60703 }
60704@@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
60705 {
60706 kfree(info->strmap);
60707 percpu_modfree(mod);
60708- module_free(mod, mod->module_init);
60709- module_free(mod, mod->module_core);
60710+ module_free_exec(mod, mod->module_init_rx);
60711+ module_free_exec(mod, mod->module_core_rx);
60712+ module_free(mod, mod->module_init_rw);
60713+ module_free(mod, mod->module_core_rw);
60714 }
60715
60716 static int post_relocation(struct module *mod, const struct load_info *info)
60717@@ -2770,9 +2865,38 @@ static struct module *load_module(void _
60718 if (err)
60719 goto free_unload;
60720
60721+ /* Now copy in args */
60722+ mod->args = strndup_user(uargs, ~0UL >> 1);
60723+ if (IS_ERR(mod->args)) {
60724+ err = PTR_ERR(mod->args);
60725+ goto free_unload;
60726+ }
60727+
60728 /* Set up MODINFO_ATTR fields */
60729 setup_modinfo(mod, &info);
60730
60731+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60732+ {
60733+ char *p, *p2;
60734+
60735+ if (strstr(mod->args, "grsec_modharden_netdev")) {
60736+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
60737+ err = -EPERM;
60738+ goto free_modinfo;
60739+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
60740+ p += strlen("grsec_modharden_normal");
60741+ p2 = strstr(p, "_");
60742+ if (p2) {
60743+ *p2 = '\0';
60744+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
60745+ *p2 = '_';
60746+ }
60747+ err = -EPERM;
60748+ goto free_modinfo;
60749+ }
60750+ }
60751+#endif
60752+
60753 /* Fix up syms, so that st_value is a pointer to location. */
60754 err = simplify_symbols(mod, &info);
60755 if (err < 0)
60756@@ -2788,13 +2912,6 @@ static struct module *load_module(void _
60757
60758 flush_module_icache(mod);
60759
60760- /* Now copy in args */
60761- mod->args = strndup_user(uargs, ~0UL >> 1);
60762- if (IS_ERR(mod->args)) {
60763- err = PTR_ERR(mod->args);
60764- goto free_arch_cleanup;
60765- }
60766-
60767 /* Mark state as coming so strong_try_module_get() ignores us. */
60768 mod->state = MODULE_STATE_COMING;
60769
60770@@ -2854,11 +2971,10 @@ static struct module *load_module(void _
60771 unlock:
60772 mutex_unlock(&module_mutex);
60773 synchronize_sched();
60774- kfree(mod->args);
60775- free_arch_cleanup:
60776 module_arch_cleanup(mod);
60777 free_modinfo:
60778 free_modinfo(mod);
60779+ kfree(mod->args);
60780 free_unload:
60781 module_unload_free(mod);
60782 free_module:
60783@@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
60784 MODULE_STATE_COMING, mod);
60785
60786 /* Set RO and NX regions for core */
60787- set_section_ro_nx(mod->module_core,
60788- mod->core_text_size,
60789- mod->core_ro_size,
60790- mod->core_size);
60791+ set_section_ro_nx(mod->module_core_rx,
60792+ mod->core_size_rx,
60793+ mod->core_size_rx,
60794+ mod->core_size_rx);
60795
60796 /* Set RO and NX regions for init */
60797- set_section_ro_nx(mod->module_init,
60798- mod->init_text_size,
60799- mod->init_ro_size,
60800- mod->init_size);
60801+ set_section_ro_nx(mod->module_init_rx,
60802+ mod->init_size_rx,
60803+ mod->init_size_rx,
60804+ mod->init_size_rx);
60805
60806 do_mod_ctors(mod);
60807 /* Start the module */
60808@@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
60809 mod->strtab = mod->core_strtab;
60810 #endif
60811 unset_module_init_ro_nx(mod);
60812- module_free(mod, mod->module_init);
60813- mod->module_init = NULL;
60814- mod->init_size = 0;
60815- mod->init_ro_size = 0;
60816- mod->init_text_size = 0;
60817+ module_free(mod, mod->module_init_rw);
60818+ module_free_exec(mod, mod->module_init_rx);
60819+ mod->module_init_rw = NULL;
60820+ mod->module_init_rx = NULL;
60821+ mod->init_size_rw = 0;
60822+ mod->init_size_rx = 0;
60823 mutex_unlock(&module_mutex);
60824
60825 return 0;
60826@@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
60827 unsigned long nextval;
60828
60829 /* At worse, next value is at end of module */
60830- if (within_module_init(addr, mod))
60831- nextval = (unsigned long)mod->module_init+mod->init_text_size;
60832+ if (within_module_init_rx(addr, mod))
60833+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
60834+ else if (within_module_init_rw(addr, mod))
60835+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
60836+ else if (within_module_core_rx(addr, mod))
60837+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
60838+ else if (within_module_core_rw(addr, mod))
60839+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
60840 else
60841- nextval = (unsigned long)mod->module_core+mod->core_text_size;
60842+ return NULL;
60843
60844 /* Scan for closest preceding symbol, and next symbol. (ELF
60845 starts real symbols at 1). */
60846@@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
60847 char buf[8];
60848
60849 seq_printf(m, "%s %u",
60850- mod->name, mod->init_size + mod->core_size);
60851+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
60852 print_unload_info(m, mod);
60853
60854 /* Informative for users. */
60855@@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
60856 mod->state == MODULE_STATE_COMING ? "Loading":
60857 "Live");
60858 /* Used by oprofile and other similar tools. */
60859- seq_printf(m, " 0x%pK", mod->module_core);
60860+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
60861
60862 /* Taints info */
60863 if (mod->taints)
60864@@ -3283,7 +3406,17 @@ static const struct file_operations proc
60865
60866 static int __init proc_modules_init(void)
60867 {
60868+#ifndef CONFIG_GRKERNSEC_HIDESYM
60869+#ifdef CONFIG_GRKERNSEC_PROC_USER
60870+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60871+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60872+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
60873+#else
60874 proc_create("modules", 0, NULL, &proc_modules_operations);
60875+#endif
60876+#else
60877+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60878+#endif
60879 return 0;
60880 }
60881 module_init(proc_modules_init);
60882@@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
60883 {
60884 struct module *mod;
60885
60886- if (addr < module_addr_min || addr > module_addr_max)
60887+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
60888+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
60889 return NULL;
60890
60891 list_for_each_entry_rcu(mod, &modules, list)
60892- if (within_module_core(addr, mod)
60893- || within_module_init(addr, mod))
60894+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
60895 return mod;
60896 return NULL;
60897 }
60898@@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
60899 */
60900 struct module *__module_text_address(unsigned long addr)
60901 {
60902- struct module *mod = __module_address(addr);
60903+ struct module *mod;
60904+
60905+#ifdef CONFIG_X86_32
60906+ addr = ktla_ktva(addr);
60907+#endif
60908+
60909+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
60910+ return NULL;
60911+
60912+ mod = __module_address(addr);
60913+
60914 if (mod) {
60915 /* Make sure it's within the text section. */
60916- if (!within(addr, mod->module_init, mod->init_text_size)
60917- && !within(addr, mod->module_core, mod->core_text_size))
60918+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
60919 mod = NULL;
60920 }
60921 return mod;
60922diff -urNp linux-3.0.4/kernel/mutex.c linux-3.0.4/kernel/mutex.c
60923--- linux-3.0.4/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
60924+++ linux-3.0.4/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
60925@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
60926 spin_lock_mutex(&lock->wait_lock, flags);
60927
60928 debug_mutex_lock_common(lock, &waiter);
60929- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
60930+ debug_mutex_add_waiter(lock, &waiter, task);
60931
60932 /* add waiting tasks to the end of the waitqueue (FIFO): */
60933 list_add_tail(&waiter.list, &lock->wait_list);
60934@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
60935 * TASK_UNINTERRUPTIBLE case.)
60936 */
60937 if (unlikely(signal_pending_state(state, task))) {
60938- mutex_remove_waiter(lock, &waiter,
60939- task_thread_info(task));
60940+ mutex_remove_waiter(lock, &waiter, task);
60941 mutex_release(&lock->dep_map, 1, ip);
60942 spin_unlock_mutex(&lock->wait_lock, flags);
60943
60944@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
60945 done:
60946 lock_acquired(&lock->dep_map, ip);
60947 /* got the lock - rejoice! */
60948- mutex_remove_waiter(lock, &waiter, current_thread_info());
60949+ mutex_remove_waiter(lock, &waiter, task);
60950 mutex_set_owner(lock);
60951
60952 /* set it to 0 if there are no waiters left: */
60953diff -urNp linux-3.0.4/kernel/mutex-debug.c linux-3.0.4/kernel/mutex-debug.c
60954--- linux-3.0.4/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
60955+++ linux-3.0.4/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
60956@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
60957 }
60958
60959 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60960- struct thread_info *ti)
60961+ struct task_struct *task)
60962 {
60963 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
60964
60965 /* Mark the current thread as blocked on the lock: */
60966- ti->task->blocked_on = waiter;
60967+ task->blocked_on = waiter;
60968 }
60969
60970 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60971- struct thread_info *ti)
60972+ struct task_struct *task)
60973 {
60974 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
60975- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
60976- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
60977- ti->task->blocked_on = NULL;
60978+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
60979+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
60980+ task->blocked_on = NULL;
60981
60982 list_del_init(&waiter->list);
60983 waiter->task = NULL;
60984diff -urNp linux-3.0.4/kernel/mutex-debug.h linux-3.0.4/kernel/mutex-debug.h
60985--- linux-3.0.4/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
60986+++ linux-3.0.4/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
60987@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
60988 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
60989 extern void debug_mutex_add_waiter(struct mutex *lock,
60990 struct mutex_waiter *waiter,
60991- struct thread_info *ti);
60992+ struct task_struct *task);
60993 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60994- struct thread_info *ti);
60995+ struct task_struct *task);
60996 extern void debug_mutex_unlock(struct mutex *lock);
60997 extern void debug_mutex_init(struct mutex *lock, const char *name,
60998 struct lock_class_key *key);
60999diff -urNp linux-3.0.4/kernel/padata.c linux-3.0.4/kernel/padata.c
61000--- linux-3.0.4/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
61001+++ linux-3.0.4/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
61002@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
61003 padata->pd = pd;
61004 padata->cb_cpu = cb_cpu;
61005
61006- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
61007- atomic_set(&pd->seq_nr, -1);
61008+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
61009+ atomic_set_unchecked(&pd->seq_nr, -1);
61010
61011- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
61012+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
61013
61014 target_cpu = padata_cpu_hash(padata);
61015 queue = per_cpu_ptr(pd->pqueue, target_cpu);
61016@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
61017 padata_init_pqueues(pd);
61018 padata_init_squeues(pd);
61019 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
61020- atomic_set(&pd->seq_nr, -1);
61021+ atomic_set_unchecked(&pd->seq_nr, -1);
61022 atomic_set(&pd->reorder_objects, 0);
61023 atomic_set(&pd->refcnt, 0);
61024 pd->pinst = pinst;
61025diff -urNp linux-3.0.4/kernel/panic.c linux-3.0.4/kernel/panic.c
61026--- linux-3.0.4/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
61027+++ linux-3.0.4/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
61028@@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
61029 const char *board;
61030
61031 printk(KERN_WARNING "------------[ cut here ]------------\n");
61032- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
61033+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
61034 board = dmi_get_system_info(DMI_PRODUCT_NAME);
61035 if (board)
61036 printk(KERN_WARNING "Hardware name: %s\n", board);
61037@@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
61038 */
61039 void __stack_chk_fail(void)
61040 {
61041- panic("stack-protector: Kernel stack is corrupted in: %p\n",
61042+ dump_stack();
61043+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
61044 __builtin_return_address(0));
61045 }
61046 EXPORT_SYMBOL(__stack_chk_fail);
61047diff -urNp linux-3.0.4/kernel/pid.c linux-3.0.4/kernel/pid.c
61048--- linux-3.0.4/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
61049+++ linux-3.0.4/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
61050@@ -33,6 +33,7 @@
61051 #include <linux/rculist.h>
61052 #include <linux/bootmem.h>
61053 #include <linux/hash.h>
61054+#include <linux/security.h>
61055 #include <linux/pid_namespace.h>
61056 #include <linux/init_task.h>
61057 #include <linux/syscalls.h>
61058@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
61059
61060 int pid_max = PID_MAX_DEFAULT;
61061
61062-#define RESERVED_PIDS 300
61063+#define RESERVED_PIDS 500
61064
61065 int pid_max_min = RESERVED_PIDS + 1;
61066 int pid_max_max = PID_MAX_LIMIT;
61067@@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
61068 */
61069 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
61070 {
61071+ struct task_struct *task;
61072+
61073 rcu_lockdep_assert(rcu_read_lock_held());
61074- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61075+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61076+
61077+ if (gr_pid_is_chrooted(task))
61078+ return NULL;
61079+
61080+ return task;
61081 }
61082
61083 struct task_struct *find_task_by_vpid(pid_t vnr)
61084@@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
61085 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
61086 }
61087
61088+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
61089+{
61090+ rcu_lockdep_assert(rcu_read_lock_held());
61091+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
61092+}
61093+
61094 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
61095 {
61096 struct pid *pid;
61097diff -urNp linux-3.0.4/kernel/posix-cpu-timers.c linux-3.0.4/kernel/posix-cpu-timers.c
61098--- linux-3.0.4/kernel/posix-cpu-timers.c 2011-07-21 22:17:23.000000000 -0400
61099+++ linux-3.0.4/kernel/posix-cpu-timers.c 2011-08-23 21:48:14.000000000 -0400
61100@@ -6,6 +6,7 @@
61101 #include <linux/posix-timers.h>
61102 #include <linux/errno.h>
61103 #include <linux/math64.h>
61104+#include <linux/security.h>
61105 #include <asm/uaccess.h>
61106 #include <linux/kernel_stat.h>
61107 #include <trace/events/timer.h>
61108@@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
61109
61110 static __init int init_posix_cpu_timers(void)
61111 {
61112- struct k_clock process = {
61113+ static struct k_clock process = {
61114 .clock_getres = process_cpu_clock_getres,
61115 .clock_get = process_cpu_clock_get,
61116 .timer_create = process_cpu_timer_create,
61117 .nsleep = process_cpu_nsleep,
61118 .nsleep_restart = process_cpu_nsleep_restart,
61119 };
61120- struct k_clock thread = {
61121+ static struct k_clock thread = {
61122 .clock_getres = thread_cpu_clock_getres,
61123 .clock_get = thread_cpu_clock_get,
61124 .timer_create = thread_cpu_timer_create,
61125diff -urNp linux-3.0.4/kernel/posix-timers.c linux-3.0.4/kernel/posix-timers.c
61126--- linux-3.0.4/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
61127+++ linux-3.0.4/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
61128@@ -43,6 +43,7 @@
61129 #include <linux/idr.h>
61130 #include <linux/posix-clock.h>
61131 #include <linux/posix-timers.h>
61132+#include <linux/grsecurity.h>
61133 #include <linux/syscalls.h>
61134 #include <linux/wait.h>
61135 #include <linux/workqueue.h>
61136@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
61137 * which we beg off on and pass to do_sys_settimeofday().
61138 */
61139
61140-static struct k_clock posix_clocks[MAX_CLOCKS];
61141+static struct k_clock *posix_clocks[MAX_CLOCKS];
61142
61143 /*
61144 * These ones are defined below.
61145@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
61146 */
61147 static __init int init_posix_timers(void)
61148 {
61149- struct k_clock clock_realtime = {
61150+ static struct k_clock clock_realtime = {
61151 .clock_getres = hrtimer_get_res,
61152 .clock_get = posix_clock_realtime_get,
61153 .clock_set = posix_clock_realtime_set,
61154@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
61155 .timer_get = common_timer_get,
61156 .timer_del = common_timer_del,
61157 };
61158- struct k_clock clock_monotonic = {
61159+ static struct k_clock clock_monotonic = {
61160 .clock_getres = hrtimer_get_res,
61161 .clock_get = posix_ktime_get_ts,
61162 .nsleep = common_nsleep,
61163@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
61164 .timer_get = common_timer_get,
61165 .timer_del = common_timer_del,
61166 };
61167- struct k_clock clock_monotonic_raw = {
61168+ static struct k_clock clock_monotonic_raw = {
61169 .clock_getres = hrtimer_get_res,
61170 .clock_get = posix_get_monotonic_raw,
61171 };
61172- struct k_clock clock_realtime_coarse = {
61173+ static struct k_clock clock_realtime_coarse = {
61174 .clock_getres = posix_get_coarse_res,
61175 .clock_get = posix_get_realtime_coarse,
61176 };
61177- struct k_clock clock_monotonic_coarse = {
61178+ static struct k_clock clock_monotonic_coarse = {
61179 .clock_getres = posix_get_coarse_res,
61180 .clock_get = posix_get_monotonic_coarse,
61181 };
61182- struct k_clock clock_boottime = {
61183+ static struct k_clock clock_boottime = {
61184 .clock_getres = hrtimer_get_res,
61185 .clock_get = posix_get_boottime,
61186 .nsleep = common_nsleep,
61187@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
61188 .timer_del = common_timer_del,
61189 };
61190
61191+ pax_track_stack();
61192+
61193 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
61194 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
61195 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
61196@@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
61197 return;
61198 }
61199
61200- posix_clocks[clock_id] = *new_clock;
61201+ posix_clocks[clock_id] = new_clock;
61202 }
61203 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
61204
61205@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
61206 return (id & CLOCKFD_MASK) == CLOCKFD ?
61207 &clock_posix_dynamic : &clock_posix_cpu;
61208
61209- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
61210+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
61211 return NULL;
61212- return &posix_clocks[id];
61213+ return posix_clocks[id];
61214 }
61215
61216 static int common_timer_create(struct k_itimer *new_timer)
61217@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
61218 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
61219 return -EFAULT;
61220
61221+ /* only the CLOCK_REALTIME clock can be set, all other clocks
61222+ have their clock_set fptr set to a nosettime dummy function
61223+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
61224+ call common_clock_set, which calls do_sys_settimeofday, which
61225+ we hook
61226+ */
61227+
61228 return kc->clock_set(which_clock, &new_tp);
61229 }
61230
61231diff -urNp linux-3.0.4/kernel/power/poweroff.c linux-3.0.4/kernel/power/poweroff.c
61232--- linux-3.0.4/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
61233+++ linux-3.0.4/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
61234@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
61235 .enable_mask = SYSRQ_ENABLE_BOOT,
61236 };
61237
61238-static int pm_sysrq_init(void)
61239+static int __init pm_sysrq_init(void)
61240 {
61241 register_sysrq_key('o', &sysrq_poweroff_op);
61242 return 0;
61243diff -urNp linux-3.0.4/kernel/power/process.c linux-3.0.4/kernel/power/process.c
61244--- linux-3.0.4/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
61245+++ linux-3.0.4/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
61246@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
61247 u64 elapsed_csecs64;
61248 unsigned int elapsed_csecs;
61249 bool wakeup = false;
61250+ bool timedout = false;
61251
61252 do_gettimeofday(&start);
61253
61254@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
61255
61256 while (true) {
61257 todo = 0;
61258+ if (time_after(jiffies, end_time))
61259+ timedout = true;
61260 read_lock(&tasklist_lock);
61261 do_each_thread(g, p) {
61262 if (frozen(p) || !freezable(p))
61263@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
61264 * try_to_stop() after schedule() in ptrace/signal
61265 * stop sees TIF_FREEZE.
61266 */
61267- if (!task_is_stopped_or_traced(p) &&
61268- !freezer_should_skip(p))
61269+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
61270 todo++;
61271+ if (timedout) {
61272+ printk(KERN_ERR "Task refusing to freeze:\n");
61273+ sched_show_task(p);
61274+ }
61275+ }
61276 } while_each_thread(g, p);
61277 read_unlock(&tasklist_lock);
61278
61279@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
61280 todo += wq_busy;
61281 }
61282
61283- if (!todo || time_after(jiffies, end_time))
61284+ if (!todo || timedout)
61285 break;
61286
61287 if (pm_wakeup_pending()) {
61288diff -urNp linux-3.0.4/kernel/printk.c linux-3.0.4/kernel/printk.c
61289--- linux-3.0.4/kernel/printk.c 2011-07-21 22:17:23.000000000 -0400
61290+++ linux-3.0.4/kernel/printk.c 2011-08-23 21:48:14.000000000 -0400
61291@@ -313,12 +313,17 @@ static int check_syslog_permissions(int
61292 if (from_file && type != SYSLOG_ACTION_OPEN)
61293 return 0;
61294
61295+#ifdef CONFIG_GRKERNSEC_DMESG
61296+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
61297+ return -EPERM;
61298+#endif
61299+
61300 if (syslog_action_restricted(type)) {
61301 if (capable(CAP_SYSLOG))
61302 return 0;
61303 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
61304 if (capable(CAP_SYS_ADMIN)) {
61305- WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
61306+ printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
61307 "but no CAP_SYSLOG (deprecated).\n");
61308 return 0;
61309 }
61310diff -urNp linux-3.0.4/kernel/profile.c linux-3.0.4/kernel/profile.c
61311--- linux-3.0.4/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
61312+++ linux-3.0.4/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
61313@@ -39,7 +39,7 @@ struct profile_hit {
61314 /* Oprofile timer tick hook */
61315 static int (*timer_hook)(struct pt_regs *) __read_mostly;
61316
61317-static atomic_t *prof_buffer;
61318+static atomic_unchecked_t *prof_buffer;
61319 static unsigned long prof_len, prof_shift;
61320
61321 int prof_on __read_mostly;
61322@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
61323 hits[i].pc = 0;
61324 continue;
61325 }
61326- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61327+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61328 hits[i].hits = hits[i].pc = 0;
61329 }
61330 }
61331@@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
61332 * Add the current hit(s) and flush the write-queue out
61333 * to the global buffer:
61334 */
61335- atomic_add(nr_hits, &prof_buffer[pc]);
61336+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
61337 for (i = 0; i < NR_PROFILE_HIT; ++i) {
61338- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61339+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61340 hits[i].pc = hits[i].hits = 0;
61341 }
61342 out:
61343@@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
61344 {
61345 unsigned long pc;
61346 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
61347- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61348+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61349 }
61350 #endif /* !CONFIG_SMP */
61351
61352@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
61353 return -EFAULT;
61354 buf++; p++; count--; read++;
61355 }
61356- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
61357+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
61358 if (copy_to_user(buf, (void *)pnt, count))
61359 return -EFAULT;
61360 read += count;
61361@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
61362 }
61363 #endif
61364 profile_discard_flip_buffers();
61365- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
61366+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
61367 return count;
61368 }
61369
61370diff -urNp linux-3.0.4/kernel/ptrace.c linux-3.0.4/kernel/ptrace.c
61371--- linux-3.0.4/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
61372+++ linux-3.0.4/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
61373@@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
61374 return ret;
61375 }
61376
61377-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
61378+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
61379+ unsigned int log)
61380 {
61381 const struct cred *cred = current_cred(), *tcred;
61382
61383@@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
61384 cred->gid == tcred->sgid &&
61385 cred->gid == tcred->gid))
61386 goto ok;
61387- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
61388+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
61389+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
61390 goto ok;
61391 rcu_read_unlock();
61392 return -EPERM;
61393@@ -167,7 +169,9 @@ ok:
61394 smp_rmb();
61395 if (task->mm)
61396 dumpable = get_dumpable(task->mm);
61397- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
61398+ if (!dumpable &&
61399+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
61400+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
61401 return -EPERM;
61402
61403 return security_ptrace_access_check(task, mode);
61404@@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
61405 {
61406 int err;
61407 task_lock(task);
61408- err = __ptrace_may_access(task, mode);
61409+ err = __ptrace_may_access(task, mode, 0);
61410+ task_unlock(task);
61411+ return !err;
61412+}
61413+
61414+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
61415+{
61416+ int err;
61417+ task_lock(task);
61418+ err = __ptrace_may_access(task, mode, 1);
61419 task_unlock(task);
61420 return !err;
61421 }
61422@@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
61423 goto out;
61424
61425 task_lock(task);
61426- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
61427+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
61428 task_unlock(task);
61429 if (retval)
61430 goto unlock_creds;
61431@@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
61432 goto unlock_tasklist;
61433
61434 task->ptrace = PT_PTRACED;
61435- if (task_ns_capable(task, CAP_SYS_PTRACE))
61436+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
61437 task->ptrace |= PT_PTRACE_CAP;
61438
61439 __ptrace_link(task, current);
61440@@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
61441 {
61442 int copied = 0;
61443
61444+ pax_track_stack();
61445+
61446 while (len > 0) {
61447 char buf[128];
61448 int this_len, retval;
61449@@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
61450 break;
61451 return -EIO;
61452 }
61453- if (copy_to_user(dst, buf, retval))
61454+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
61455 return -EFAULT;
61456 copied += retval;
61457 src += retval;
61458@@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
61459 {
61460 int copied = 0;
61461
61462+ pax_track_stack();
61463+
61464 while (len > 0) {
61465 char buf[128];
61466 int this_len, retval;
61467@@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
61468 {
61469 int ret = -EIO;
61470 siginfo_t siginfo;
61471- void __user *datavp = (void __user *) data;
61472+ void __user *datavp = (__force void __user *) data;
61473 unsigned long __user *datalp = datavp;
61474
61475+ pax_track_stack();
61476+
61477 switch (request) {
61478 case PTRACE_PEEKTEXT:
61479 case PTRACE_PEEKDATA:
61480@@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
61481 goto out;
61482 }
61483
61484+ if (gr_handle_ptrace(child, request)) {
61485+ ret = -EPERM;
61486+ goto out_put_task_struct;
61487+ }
61488+
61489 if (request == PTRACE_ATTACH) {
61490 ret = ptrace_attach(child);
61491 /*
61492 * Some architectures need to do book-keeping after
61493 * a ptrace attach.
61494 */
61495- if (!ret)
61496+ if (!ret) {
61497 arch_ptrace_attach(child);
61498+ gr_audit_ptrace(child);
61499+ }
61500 goto out_put_task_struct;
61501 }
61502
61503@@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
61504 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
61505 if (copied != sizeof(tmp))
61506 return -EIO;
61507- return put_user(tmp, (unsigned long __user *)data);
61508+ return put_user(tmp, (__force unsigned long __user *)data);
61509 }
61510
61511 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
61512@@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
61513 siginfo_t siginfo;
61514 int ret;
61515
61516+ pax_track_stack();
61517+
61518 switch (request) {
61519 case PTRACE_PEEKTEXT:
61520 case PTRACE_PEEKDATA:
61521@@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
61522 goto out;
61523 }
61524
61525+ if (gr_handle_ptrace(child, request)) {
61526+ ret = -EPERM;
61527+ goto out_put_task_struct;
61528+ }
61529+
61530 if (request == PTRACE_ATTACH) {
61531 ret = ptrace_attach(child);
61532 /*
61533 * Some architectures need to do book-keeping after
61534 * a ptrace attach.
61535 */
61536- if (!ret)
61537+ if (!ret) {
61538 arch_ptrace_attach(child);
61539+ gr_audit_ptrace(child);
61540+ }
61541 goto out_put_task_struct;
61542 }
61543
61544diff -urNp linux-3.0.4/kernel/rcutorture.c linux-3.0.4/kernel/rcutorture.c
61545--- linux-3.0.4/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
61546+++ linux-3.0.4/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
61547@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
61548 { 0 };
61549 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
61550 { 0 };
61551-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61552-static atomic_t n_rcu_torture_alloc;
61553-static atomic_t n_rcu_torture_alloc_fail;
61554-static atomic_t n_rcu_torture_free;
61555-static atomic_t n_rcu_torture_mberror;
61556-static atomic_t n_rcu_torture_error;
61557+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61558+static atomic_unchecked_t n_rcu_torture_alloc;
61559+static atomic_unchecked_t n_rcu_torture_alloc_fail;
61560+static atomic_unchecked_t n_rcu_torture_free;
61561+static atomic_unchecked_t n_rcu_torture_mberror;
61562+static atomic_unchecked_t n_rcu_torture_error;
61563 static long n_rcu_torture_boost_ktrerror;
61564 static long n_rcu_torture_boost_rterror;
61565 static long n_rcu_torture_boost_failure;
61566@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
61567
61568 spin_lock_bh(&rcu_torture_lock);
61569 if (list_empty(&rcu_torture_freelist)) {
61570- atomic_inc(&n_rcu_torture_alloc_fail);
61571+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
61572 spin_unlock_bh(&rcu_torture_lock);
61573 return NULL;
61574 }
61575- atomic_inc(&n_rcu_torture_alloc);
61576+ atomic_inc_unchecked(&n_rcu_torture_alloc);
61577 p = rcu_torture_freelist.next;
61578 list_del_init(p);
61579 spin_unlock_bh(&rcu_torture_lock);
61580@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
61581 static void
61582 rcu_torture_free(struct rcu_torture *p)
61583 {
61584- atomic_inc(&n_rcu_torture_free);
61585+ atomic_inc_unchecked(&n_rcu_torture_free);
61586 spin_lock_bh(&rcu_torture_lock);
61587 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
61588 spin_unlock_bh(&rcu_torture_lock);
61589@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
61590 i = rp->rtort_pipe_count;
61591 if (i > RCU_TORTURE_PIPE_LEN)
61592 i = RCU_TORTURE_PIPE_LEN;
61593- atomic_inc(&rcu_torture_wcount[i]);
61594+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
61595 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61596 rp->rtort_mbtest = 0;
61597 rcu_torture_free(rp);
61598@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
61599 i = rp->rtort_pipe_count;
61600 if (i > RCU_TORTURE_PIPE_LEN)
61601 i = RCU_TORTURE_PIPE_LEN;
61602- atomic_inc(&rcu_torture_wcount[i]);
61603+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
61604 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61605 rp->rtort_mbtest = 0;
61606 list_del(&rp->rtort_free);
61607@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
61608 i = old_rp->rtort_pipe_count;
61609 if (i > RCU_TORTURE_PIPE_LEN)
61610 i = RCU_TORTURE_PIPE_LEN;
61611- atomic_inc(&rcu_torture_wcount[i]);
61612+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
61613 old_rp->rtort_pipe_count++;
61614 cur_ops->deferred_free(old_rp);
61615 }
61616@@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
61617 return;
61618 }
61619 if (p->rtort_mbtest == 0)
61620- atomic_inc(&n_rcu_torture_mberror);
61621+ atomic_inc_unchecked(&n_rcu_torture_mberror);
61622 spin_lock(&rand_lock);
61623 cur_ops->read_delay(&rand);
61624 n_rcu_torture_timers++;
61625@@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
61626 continue;
61627 }
61628 if (p->rtort_mbtest == 0)
61629- atomic_inc(&n_rcu_torture_mberror);
61630+ atomic_inc_unchecked(&n_rcu_torture_mberror);
61631 cur_ops->read_delay(&rand);
61632 preempt_disable();
61633 pipe_count = p->rtort_pipe_count;
61634@@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
61635 rcu_torture_current,
61636 rcu_torture_current_version,
61637 list_empty(&rcu_torture_freelist),
61638- atomic_read(&n_rcu_torture_alloc),
61639- atomic_read(&n_rcu_torture_alloc_fail),
61640- atomic_read(&n_rcu_torture_free),
61641- atomic_read(&n_rcu_torture_mberror),
61642+ atomic_read_unchecked(&n_rcu_torture_alloc),
61643+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
61644+ atomic_read_unchecked(&n_rcu_torture_free),
61645+ atomic_read_unchecked(&n_rcu_torture_mberror),
61646 n_rcu_torture_boost_ktrerror,
61647 n_rcu_torture_boost_rterror,
61648 n_rcu_torture_boost_failure,
61649 n_rcu_torture_boosts,
61650 n_rcu_torture_timers);
61651- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
61652+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
61653 n_rcu_torture_boost_ktrerror != 0 ||
61654 n_rcu_torture_boost_rterror != 0 ||
61655 n_rcu_torture_boost_failure != 0)
61656@@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
61657 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
61658 if (i > 1) {
61659 cnt += sprintf(&page[cnt], "!!! ");
61660- atomic_inc(&n_rcu_torture_error);
61661+ atomic_inc_unchecked(&n_rcu_torture_error);
61662 WARN_ON_ONCE(1);
61663 }
61664 cnt += sprintf(&page[cnt], "Reader Pipe: ");
61665@@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
61666 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
61667 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61668 cnt += sprintf(&page[cnt], " %d",
61669- atomic_read(&rcu_torture_wcount[i]));
61670+ atomic_read_unchecked(&rcu_torture_wcount[i]));
61671 }
61672 cnt += sprintf(&page[cnt], "\n");
61673 if (cur_ops->stats)
61674@@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
61675
61676 if (cur_ops->cleanup)
61677 cur_ops->cleanup();
61678- if (atomic_read(&n_rcu_torture_error))
61679+ if (atomic_read_unchecked(&n_rcu_torture_error))
61680 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
61681 else
61682 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
61683@@ -1476,17 +1476,17 @@ rcu_torture_init(void)
61684
61685 rcu_torture_current = NULL;
61686 rcu_torture_current_version = 0;
61687- atomic_set(&n_rcu_torture_alloc, 0);
61688- atomic_set(&n_rcu_torture_alloc_fail, 0);
61689- atomic_set(&n_rcu_torture_free, 0);
61690- atomic_set(&n_rcu_torture_mberror, 0);
61691- atomic_set(&n_rcu_torture_error, 0);
61692+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
61693+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
61694+ atomic_set_unchecked(&n_rcu_torture_free, 0);
61695+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
61696+ atomic_set_unchecked(&n_rcu_torture_error, 0);
61697 n_rcu_torture_boost_ktrerror = 0;
61698 n_rcu_torture_boost_rterror = 0;
61699 n_rcu_torture_boost_failure = 0;
61700 n_rcu_torture_boosts = 0;
61701 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
61702- atomic_set(&rcu_torture_wcount[i], 0);
61703+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
61704 for_each_possible_cpu(cpu) {
61705 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61706 per_cpu(rcu_torture_count, cpu)[i] = 0;
61707diff -urNp linux-3.0.4/kernel/rcutree.c linux-3.0.4/kernel/rcutree.c
61708--- linux-3.0.4/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
61709+++ linux-3.0.4/kernel/rcutree.c 2011-09-14 09:08:05.000000000 -0400
61710@@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
61711 }
61712 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
61713 smp_mb__before_atomic_inc(); /* See above. */
61714- atomic_inc(&rdtp->dynticks);
61715+ atomic_inc_unchecked(&rdtp->dynticks);
61716 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
61717- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
61718+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
61719 local_irq_restore(flags);
61720
61721 /* If the interrupt queued a callback, get out of dyntick mode. */
61722@@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
61723 return;
61724 }
61725 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
61726- atomic_inc(&rdtp->dynticks);
61727+ atomic_inc_unchecked(&rdtp->dynticks);
61728 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
61729 smp_mb__after_atomic_inc(); /* See above. */
61730- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
61731+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
61732 local_irq_restore(flags);
61733 }
61734
61735@@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
61736 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
61737
61738 if (rdtp->dynticks_nmi_nesting == 0 &&
61739- (atomic_read(&rdtp->dynticks) & 0x1))
61740+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
61741 return;
61742 rdtp->dynticks_nmi_nesting++;
61743 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
61744- atomic_inc(&rdtp->dynticks);
61745+ atomic_inc_unchecked(&rdtp->dynticks);
61746 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
61747 smp_mb__after_atomic_inc(); /* See above. */
61748- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
61749+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
61750 }
61751
61752 /**
61753@@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
61754 return;
61755 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
61756 smp_mb__before_atomic_inc(); /* See above. */
61757- atomic_inc(&rdtp->dynticks);
61758+ atomic_inc_unchecked(&rdtp->dynticks);
61759 smp_mb__after_atomic_inc(); /* Force delay to next write. */
61760- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
61761+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
61762 }
61763
61764 /**
61765@@ -469,7 +469,7 @@ void rcu_irq_exit(void)
61766 */
61767 static int dyntick_save_progress_counter(struct rcu_data *rdp)
61768 {
61769- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
61770+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
61771 return 0;
61772 }
61773
61774@@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(stru
61775 unsigned long curr;
61776 unsigned long snap;
61777
61778- curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
61779+ curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
61780 snap = (unsigned long)rdp->dynticks_snap;
61781
61782 /*
61783@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
61784 /*
61785 * Do softirq processing for the current CPU.
61786 */
61787-static void rcu_process_callbacks(struct softirq_action *unused)
61788+static void rcu_process_callbacks(void)
61789 {
61790 __rcu_process_callbacks(&rcu_sched_state,
61791 &__get_cpu_var(rcu_sched_data));
61792diff -urNp linux-3.0.4/kernel/rcutree.h linux-3.0.4/kernel/rcutree.h
61793--- linux-3.0.4/kernel/rcutree.h 2011-07-21 22:17:23.000000000 -0400
61794+++ linux-3.0.4/kernel/rcutree.h 2011-09-14 09:08:05.000000000 -0400
61795@@ -86,7 +86,7 @@
61796 struct rcu_dynticks {
61797 int dynticks_nesting; /* Track irq/process nesting level. */
61798 int dynticks_nmi_nesting; /* Track NMI nesting level. */
61799- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
61800+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
61801 };
61802
61803 /* RCU's kthread states for tracing. */
61804diff -urNp linux-3.0.4/kernel/rcutree_plugin.h linux-3.0.4/kernel/rcutree_plugin.h
61805--- linux-3.0.4/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
61806+++ linux-3.0.4/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
61807@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
61808
61809 /* Clean up and exit. */
61810 smp_mb(); /* ensure expedited GP seen before counter increment. */
61811- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
61812+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
61813 unlock_mb_ret:
61814 mutex_unlock(&sync_rcu_preempt_exp_mutex);
61815 mb_ret:
61816@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
61817
61818 #else /* #ifndef CONFIG_SMP */
61819
61820-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
61821-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
61822+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
61823+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
61824
61825 static int synchronize_sched_expedited_cpu_stop(void *data)
61826 {
61827@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
61828 int firstsnap, s, snap, trycount = 0;
61829
61830 /* Note that atomic_inc_return() implies full memory barrier. */
61831- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
61832+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
61833 get_online_cpus();
61834
61835 /*
61836@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
61837 }
61838
61839 /* Check to see if someone else did our work for us. */
61840- s = atomic_read(&sync_sched_expedited_done);
61841+ s = atomic_read_unchecked(&sync_sched_expedited_done);
61842 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
61843 smp_mb(); /* ensure test happens before caller kfree */
61844 return;
61845@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
61846 * grace period works for us.
61847 */
61848 get_online_cpus();
61849- snap = atomic_read(&sync_sched_expedited_started) - 1;
61850+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
61851 smp_mb(); /* ensure read is before try_stop_cpus(). */
61852 }
61853
61854@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
61855 * than we did beat us to the punch.
61856 */
61857 do {
61858- s = atomic_read(&sync_sched_expedited_done);
61859+ s = atomic_read_unchecked(&sync_sched_expedited_done);
61860 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
61861 smp_mb(); /* ensure test happens before caller kfree */
61862 break;
61863 }
61864- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
61865+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
61866
61867 put_online_cpus();
61868 }
61869diff -urNp linux-3.0.4/kernel/relay.c linux-3.0.4/kernel/relay.c
61870--- linux-3.0.4/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
61871+++ linux-3.0.4/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
61872@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
61873 };
61874 ssize_t ret;
61875
61876+ pax_track_stack();
61877+
61878 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
61879 return 0;
61880 if (splice_grow_spd(pipe, &spd))
61881diff -urNp linux-3.0.4/kernel/resource.c linux-3.0.4/kernel/resource.c
61882--- linux-3.0.4/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
61883+++ linux-3.0.4/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
61884@@ -141,8 +141,18 @@ static const struct file_operations proc
61885
61886 static int __init ioresources_init(void)
61887 {
61888+#ifdef CONFIG_GRKERNSEC_PROC_ADD
61889+#ifdef CONFIG_GRKERNSEC_PROC_USER
61890+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
61891+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
61892+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61893+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
61894+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
61895+#endif
61896+#else
61897 proc_create("ioports", 0, NULL, &proc_ioports_operations);
61898 proc_create("iomem", 0, NULL, &proc_iomem_operations);
61899+#endif
61900 return 0;
61901 }
61902 __initcall(ioresources_init);
61903diff -urNp linux-3.0.4/kernel/rtmutex-tester.c linux-3.0.4/kernel/rtmutex-tester.c
61904--- linux-3.0.4/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
61905+++ linux-3.0.4/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
61906@@ -20,7 +20,7 @@
61907 #define MAX_RT_TEST_MUTEXES 8
61908
61909 static spinlock_t rttest_lock;
61910-static atomic_t rttest_event;
61911+static atomic_unchecked_t rttest_event;
61912
61913 struct test_thread_data {
61914 int opcode;
61915@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
61916
61917 case RTTEST_LOCKCONT:
61918 td->mutexes[td->opdata] = 1;
61919- td->event = atomic_add_return(1, &rttest_event);
61920+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61921 return 0;
61922
61923 case RTTEST_RESET:
61924@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
61925 return 0;
61926
61927 case RTTEST_RESETEVENT:
61928- atomic_set(&rttest_event, 0);
61929+ atomic_set_unchecked(&rttest_event, 0);
61930 return 0;
61931
61932 default:
61933@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
61934 return ret;
61935
61936 td->mutexes[id] = 1;
61937- td->event = atomic_add_return(1, &rttest_event);
61938+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61939 rt_mutex_lock(&mutexes[id]);
61940- td->event = atomic_add_return(1, &rttest_event);
61941+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61942 td->mutexes[id] = 4;
61943 return 0;
61944
61945@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
61946 return ret;
61947
61948 td->mutexes[id] = 1;
61949- td->event = atomic_add_return(1, &rttest_event);
61950+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61951 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
61952- td->event = atomic_add_return(1, &rttest_event);
61953+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61954 td->mutexes[id] = ret ? 0 : 4;
61955 return ret ? -EINTR : 0;
61956
61957@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
61958 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
61959 return ret;
61960
61961- td->event = atomic_add_return(1, &rttest_event);
61962+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61963 rt_mutex_unlock(&mutexes[id]);
61964- td->event = atomic_add_return(1, &rttest_event);
61965+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61966 td->mutexes[id] = 0;
61967 return 0;
61968
61969@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
61970 break;
61971
61972 td->mutexes[dat] = 2;
61973- td->event = atomic_add_return(1, &rttest_event);
61974+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61975 break;
61976
61977 default:
61978@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
61979 return;
61980
61981 td->mutexes[dat] = 3;
61982- td->event = atomic_add_return(1, &rttest_event);
61983+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61984 break;
61985
61986 case RTTEST_LOCKNOWAIT:
61987@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
61988 return;
61989
61990 td->mutexes[dat] = 1;
61991- td->event = atomic_add_return(1, &rttest_event);
61992+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61993 return;
61994
61995 default:
61996diff -urNp linux-3.0.4/kernel/sched_autogroup.c linux-3.0.4/kernel/sched_autogroup.c
61997--- linux-3.0.4/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
61998+++ linux-3.0.4/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
61999@@ -7,7 +7,7 @@
62000
62001 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
62002 static struct autogroup autogroup_default;
62003-static atomic_t autogroup_seq_nr;
62004+static atomic_unchecked_t autogroup_seq_nr;
62005
62006 static void __init autogroup_init(struct task_struct *init_task)
62007 {
62008@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
62009
62010 kref_init(&ag->kref);
62011 init_rwsem(&ag->lock);
62012- ag->id = atomic_inc_return(&autogroup_seq_nr);
62013+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
62014 ag->tg = tg;
62015 #ifdef CONFIG_RT_GROUP_SCHED
62016 /*
62017diff -urNp linux-3.0.4/kernel/sched.c linux-3.0.4/kernel/sched.c
62018--- linux-3.0.4/kernel/sched.c 2011-07-21 22:17:23.000000000 -0400
62019+++ linux-3.0.4/kernel/sched.c 2011-08-23 21:48:14.000000000 -0400
62020@@ -4251,6 +4251,8 @@ asmlinkage void __sched schedule(void)
62021 struct rq *rq;
62022 int cpu;
62023
62024+ pax_track_stack();
62025+
62026 need_resched:
62027 preempt_disable();
62028 cpu = smp_processor_id();
62029@@ -4934,6 +4936,8 @@ int can_nice(const struct task_struct *p
62030 /* convert nice value [19,-20] to rlimit style value [1,40] */
62031 int nice_rlim = 20 - nice;
62032
62033+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
62034+
62035 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
62036 capable(CAP_SYS_NICE));
62037 }
62038@@ -4967,7 +4971,8 @@ SYSCALL_DEFINE1(nice, int, increment)
62039 if (nice > 19)
62040 nice = 19;
62041
62042- if (increment < 0 && !can_nice(current, nice))
62043+ if (increment < 0 && (!can_nice(current, nice) ||
62044+ gr_handle_chroot_nice()))
62045 return -EPERM;
62046
62047 retval = security_task_setnice(current, nice);
62048@@ -5111,6 +5116,7 @@ recheck:
62049 unsigned long rlim_rtprio =
62050 task_rlimit(p, RLIMIT_RTPRIO);
62051
62052+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
62053 /* can't set/change the rt policy */
62054 if (policy != p->policy && !rlim_rtprio)
62055 return -EPERM;
62056diff -urNp linux-3.0.4/kernel/sched_fair.c linux-3.0.4/kernel/sched_fair.c
62057--- linux-3.0.4/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
62058+++ linux-3.0.4/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
62059@@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
62060 * run_rebalance_domains is triggered when needed from the scheduler tick.
62061 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
62062 */
62063-static void run_rebalance_domains(struct softirq_action *h)
62064+static void run_rebalance_domains(void)
62065 {
62066 int this_cpu = smp_processor_id();
62067 struct rq *this_rq = cpu_rq(this_cpu);
62068diff -urNp linux-3.0.4/kernel/signal.c linux-3.0.4/kernel/signal.c
62069--- linux-3.0.4/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
62070+++ linux-3.0.4/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
62071@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
62072
62073 int print_fatal_signals __read_mostly;
62074
62075-static void __user *sig_handler(struct task_struct *t, int sig)
62076+static __sighandler_t sig_handler(struct task_struct *t, int sig)
62077 {
62078 return t->sighand->action[sig - 1].sa.sa_handler;
62079 }
62080
62081-static int sig_handler_ignored(void __user *handler, int sig)
62082+static int sig_handler_ignored(__sighandler_t handler, int sig)
62083 {
62084 /* Is it explicitly or implicitly ignored? */
62085 return handler == SIG_IGN ||
62086@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
62087 static int sig_task_ignored(struct task_struct *t, int sig,
62088 int from_ancestor_ns)
62089 {
62090- void __user *handler;
62091+ __sighandler_t handler;
62092
62093 handler = sig_handler(t, sig);
62094
62095@@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
62096 atomic_inc(&user->sigpending);
62097 rcu_read_unlock();
62098
62099+ if (!override_rlimit)
62100+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
62101+
62102 if (override_rlimit ||
62103 atomic_read(&user->sigpending) <=
62104 task_rlimit(t, RLIMIT_SIGPENDING)) {
62105@@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
62106
62107 int unhandled_signal(struct task_struct *tsk, int sig)
62108 {
62109- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
62110+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
62111 if (is_global_init(tsk))
62112 return 1;
62113 if (handler != SIG_IGN && handler != SIG_DFL)
62114@@ -770,6 +773,13 @@ static int check_kill_permission(int sig
62115 }
62116 }
62117
62118+ /* allow glibc communication via tgkill to other threads in our
62119+ thread group */
62120+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
62121+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
62122+ && gr_handle_signal(t, sig))
62123+ return -EPERM;
62124+
62125 return security_task_kill(t, info, sig, 0);
62126 }
62127
62128@@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
62129 return send_signal(sig, info, p, 1);
62130 }
62131
62132-static int
62133+int
62134 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
62135 {
62136 return send_signal(sig, info, t, 0);
62137@@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
62138 unsigned long int flags;
62139 int ret, blocked, ignored;
62140 struct k_sigaction *action;
62141+ int is_unhandled = 0;
62142
62143 spin_lock_irqsave(&t->sighand->siglock, flags);
62144 action = &t->sighand->action[sig-1];
62145@@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
62146 }
62147 if (action->sa.sa_handler == SIG_DFL)
62148 t->signal->flags &= ~SIGNAL_UNKILLABLE;
62149+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
62150+ is_unhandled = 1;
62151 ret = specific_send_sig_info(sig, info, t);
62152 spin_unlock_irqrestore(&t->sighand->siglock, flags);
62153
62154+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
62155+ normal operation */
62156+ if (is_unhandled) {
62157+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
62158+ gr_handle_crash(t, sig);
62159+ }
62160+
62161 return ret;
62162 }
62163
62164@@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
62165 ret = check_kill_permission(sig, info, p);
62166 rcu_read_unlock();
62167
62168- if (!ret && sig)
62169+ if (!ret && sig) {
62170 ret = do_send_sig_info(sig, info, p, true);
62171+ if (!ret)
62172+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
62173+ }
62174
62175 return ret;
62176 }
62177@@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
62178 {
62179 siginfo_t info;
62180
62181+ pax_track_stack();
62182+
62183 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
62184
62185 memset(&info, 0, sizeof info);
62186@@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
62187 int error = -ESRCH;
62188
62189 rcu_read_lock();
62190- p = find_task_by_vpid(pid);
62191+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62192+ /* allow glibc communication via tgkill to other threads in our
62193+ thread group */
62194+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
62195+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
62196+ p = find_task_by_vpid_unrestricted(pid);
62197+ else
62198+#endif
62199+ p = find_task_by_vpid(pid);
62200 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
62201 error = check_kill_permission(sig, info, p);
62202 /*
62203diff -urNp linux-3.0.4/kernel/smp.c linux-3.0.4/kernel/smp.c
62204--- linux-3.0.4/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
62205+++ linux-3.0.4/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
62206@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
62207 }
62208 EXPORT_SYMBOL(smp_call_function);
62209
62210-void ipi_call_lock(void)
62211+void ipi_call_lock(void) __acquires(call_function.lock)
62212 {
62213 raw_spin_lock(&call_function.lock);
62214 }
62215
62216-void ipi_call_unlock(void)
62217+void ipi_call_unlock(void) __releases(call_function.lock)
62218 {
62219 raw_spin_unlock(&call_function.lock);
62220 }
62221
62222-void ipi_call_lock_irq(void)
62223+void ipi_call_lock_irq(void) __acquires(call_function.lock)
62224 {
62225 raw_spin_lock_irq(&call_function.lock);
62226 }
62227
62228-void ipi_call_unlock_irq(void)
62229+void ipi_call_unlock_irq(void) __releases(call_function.lock)
62230 {
62231 raw_spin_unlock_irq(&call_function.lock);
62232 }
62233diff -urNp linux-3.0.4/kernel/softirq.c linux-3.0.4/kernel/softirq.c
62234--- linux-3.0.4/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
62235+++ linux-3.0.4/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
62236@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
62237
62238 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
62239
62240-char *softirq_to_name[NR_SOFTIRQS] = {
62241+const char * const softirq_to_name[NR_SOFTIRQS] = {
62242 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
62243 "TASKLET", "SCHED", "HRTIMER", "RCU"
62244 };
62245@@ -235,7 +235,7 @@ restart:
62246 kstat_incr_softirqs_this_cpu(vec_nr);
62247
62248 trace_softirq_entry(vec_nr);
62249- h->action(h);
62250+ h->action();
62251 trace_softirq_exit(vec_nr);
62252 if (unlikely(prev_count != preempt_count())) {
62253 printk(KERN_ERR "huh, entered softirq %u %s %p"
62254@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
62255 local_irq_restore(flags);
62256 }
62257
62258-void open_softirq(int nr, void (*action)(struct softirq_action *))
62259+void open_softirq(int nr, void (*action)(void))
62260 {
62261- softirq_vec[nr].action = action;
62262+ pax_open_kernel();
62263+ *(void **)&softirq_vec[nr].action = action;
62264+ pax_close_kernel();
62265 }
62266
62267 /*
62268@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
62269
62270 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
62271
62272-static void tasklet_action(struct softirq_action *a)
62273+static void tasklet_action(void)
62274 {
62275 struct tasklet_struct *list;
62276
62277@@ -476,7 +478,7 @@ static void tasklet_action(struct softir
62278 }
62279 }
62280
62281-static void tasklet_hi_action(struct softirq_action *a)
62282+static void tasklet_hi_action(void)
62283 {
62284 struct tasklet_struct *list;
62285
62286diff -urNp linux-3.0.4/kernel/sys.c linux-3.0.4/kernel/sys.c
62287--- linux-3.0.4/kernel/sys.c 2011-09-02 18:11:26.000000000 -0400
62288+++ linux-3.0.4/kernel/sys.c 2011-08-29 23:26:27.000000000 -0400
62289@@ -158,6 +158,12 @@ static int set_one_prio(struct task_stru
62290 error = -EACCES;
62291 goto out;
62292 }
62293+
62294+ if (gr_handle_chroot_setpriority(p, niceval)) {
62295+ error = -EACCES;
62296+ goto out;
62297+ }
62298+
62299 no_nice = security_task_setnice(p, niceval);
62300 if (no_nice) {
62301 error = no_nice;
62302@@ -541,6 +547,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
62303 goto error;
62304 }
62305
62306+ if (gr_check_group_change(new->gid, new->egid, -1))
62307+ goto error;
62308+
62309 if (rgid != (gid_t) -1 ||
62310 (egid != (gid_t) -1 && egid != old->gid))
62311 new->sgid = new->egid;
62312@@ -570,6 +579,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
62313 old = current_cred();
62314
62315 retval = -EPERM;
62316+
62317+ if (gr_check_group_change(gid, gid, gid))
62318+ goto error;
62319+
62320 if (nsown_capable(CAP_SETGID))
62321 new->gid = new->egid = new->sgid = new->fsgid = gid;
62322 else if (gid == old->gid || gid == old->sgid)
62323@@ -595,11 +608,18 @@ static int set_user(struct cred *new)
62324 if (!new_user)
62325 return -EAGAIN;
62326
62327+ /*
62328+ * We don't fail in case of NPROC limit excess here because too many
62329+ * poorly written programs don't check set*uid() return code, assuming
62330+ * it never fails if called by root. We may still enforce NPROC limit
62331+ * for programs doing set*uid()+execve() by harmlessly deferring the
62332+ * failure to the execve() stage.
62333+ */
62334 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
62335- new_user != INIT_USER) {
62336- free_uid(new_user);
62337- return -EAGAIN;
62338- }
62339+ new_user != INIT_USER)
62340+ current->flags |= PF_NPROC_EXCEEDED;
62341+ else
62342+ current->flags &= ~PF_NPROC_EXCEEDED;
62343
62344 free_uid(new->user);
62345 new->user = new_user;
62346@@ -650,6 +670,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
62347 goto error;
62348 }
62349
62350+ if (gr_check_user_change(new->uid, new->euid, -1))
62351+ goto error;
62352+
62353 if (new->uid != old->uid) {
62354 retval = set_user(new);
62355 if (retval < 0)
62356@@ -694,6 +717,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
62357 old = current_cred();
62358
62359 retval = -EPERM;
62360+
62361+ if (gr_check_crash_uid(uid))
62362+ goto error;
62363+ if (gr_check_user_change(uid, uid, uid))
62364+ goto error;
62365+
62366 if (nsown_capable(CAP_SETUID)) {
62367 new->suid = new->uid = uid;
62368 if (uid != old->uid) {
62369@@ -748,6 +777,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
62370 goto error;
62371 }
62372
62373+ if (gr_check_user_change(ruid, euid, -1))
62374+ goto error;
62375+
62376 if (ruid != (uid_t) -1) {
62377 new->uid = ruid;
62378 if (ruid != old->uid) {
62379@@ -812,6 +844,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
62380 goto error;
62381 }
62382
62383+ if (gr_check_group_change(rgid, egid, -1))
62384+ goto error;
62385+
62386 if (rgid != (gid_t) -1)
62387 new->gid = rgid;
62388 if (egid != (gid_t) -1)
62389@@ -858,6 +893,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62390 old = current_cred();
62391 old_fsuid = old->fsuid;
62392
62393+ if (gr_check_user_change(-1, -1, uid))
62394+ goto error;
62395+
62396 if (uid == old->uid || uid == old->euid ||
62397 uid == old->suid || uid == old->fsuid ||
62398 nsown_capable(CAP_SETUID)) {
62399@@ -868,6 +906,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62400 }
62401 }
62402
62403+error:
62404 abort_creds(new);
62405 return old_fsuid;
62406
62407@@ -894,12 +933,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
62408 if (gid == old->gid || gid == old->egid ||
62409 gid == old->sgid || gid == old->fsgid ||
62410 nsown_capable(CAP_SETGID)) {
62411+ if (gr_check_group_change(-1, -1, gid))
62412+ goto error;
62413+
62414 if (gid != old_fsgid) {
62415 new->fsgid = gid;
62416 goto change_okay;
62417 }
62418 }
62419
62420+error:
62421 abort_creds(new);
62422 return old_fsgid;
62423
62424@@ -1680,7 +1723,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
62425 error = get_dumpable(me->mm);
62426 break;
62427 case PR_SET_DUMPABLE:
62428- if (arg2 < 0 || arg2 > 1) {
62429+ if (arg2 > 1) {
62430 error = -EINVAL;
62431 break;
62432 }
62433diff -urNp linux-3.0.4/kernel/sysctl.c linux-3.0.4/kernel/sysctl.c
62434--- linux-3.0.4/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
62435+++ linux-3.0.4/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
62436@@ -85,6 +85,13 @@
62437
62438
62439 #if defined(CONFIG_SYSCTL)
62440+#include <linux/grsecurity.h>
62441+#include <linux/grinternal.h>
62442+
62443+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
62444+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
62445+ const int op);
62446+extern int gr_handle_chroot_sysctl(const int op);
62447
62448 /* External variables not in a header file. */
62449 extern int sysctl_overcommit_memory;
62450@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
62451 }
62452
62453 #endif
62454+extern struct ctl_table grsecurity_table[];
62455
62456 static struct ctl_table root_table[];
62457 static struct ctl_table_root sysctl_table_root;
62458@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
62459 int sysctl_legacy_va_layout;
62460 #endif
62461
62462+#ifdef CONFIG_PAX_SOFTMODE
62463+static ctl_table pax_table[] = {
62464+ {
62465+ .procname = "softmode",
62466+ .data = &pax_softmode,
62467+ .maxlen = sizeof(unsigned int),
62468+ .mode = 0600,
62469+ .proc_handler = &proc_dointvec,
62470+ },
62471+
62472+ { }
62473+};
62474+#endif
62475+
62476 /* The default sysctl tables: */
62477
62478 static struct ctl_table root_table[] = {
62479@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
62480 #endif
62481
62482 static struct ctl_table kern_table[] = {
62483+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
62484+ {
62485+ .procname = "grsecurity",
62486+ .mode = 0500,
62487+ .child = grsecurity_table,
62488+ },
62489+#endif
62490+
62491+#ifdef CONFIG_PAX_SOFTMODE
62492+ {
62493+ .procname = "pax",
62494+ .mode = 0500,
62495+ .child = pax_table,
62496+ },
62497+#endif
62498+
62499 {
62500 .procname = "sched_child_runs_first",
62501 .data = &sysctl_sched_child_runs_first,
62502@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
62503 .data = &modprobe_path,
62504 .maxlen = KMOD_PATH_LEN,
62505 .mode = 0644,
62506- .proc_handler = proc_dostring,
62507+ .proc_handler = proc_dostring_modpriv,
62508 },
62509 {
62510 .procname = "modules_disabled",
62511@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
62512 .extra1 = &zero,
62513 .extra2 = &one,
62514 },
62515+#endif
62516 {
62517 .procname = "kptr_restrict",
62518 .data = &kptr_restrict,
62519 .maxlen = sizeof(int),
62520 .mode = 0644,
62521 .proc_handler = proc_dmesg_restrict,
62522+#ifdef CONFIG_GRKERNSEC_HIDESYM
62523+ .extra1 = &two,
62524+#else
62525 .extra1 = &zero,
62526+#endif
62527 .extra2 = &two,
62528 },
62529-#endif
62530 {
62531 .procname = "ngroups_max",
62532 .data = &ngroups_max,
62533@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
62534 .proc_handler = proc_dointvec_minmax,
62535 .extra1 = &zero,
62536 },
62537+ {
62538+ .procname = "heap_stack_gap",
62539+ .data = &sysctl_heap_stack_gap,
62540+ .maxlen = sizeof(sysctl_heap_stack_gap),
62541+ .mode = 0644,
62542+ .proc_handler = proc_doulongvec_minmax,
62543+ },
62544 #else
62545 {
62546 .procname = "nr_trim_pages",
62547@@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
62548 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
62549 {
62550 int mode;
62551+ int error;
62552+
62553+ if (table->parent != NULL && table->parent->procname != NULL &&
62554+ table->procname != NULL &&
62555+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
62556+ return -EACCES;
62557+ if (gr_handle_chroot_sysctl(op))
62558+ return -EACCES;
62559+ error = gr_handle_sysctl(table, op);
62560+ if (error)
62561+ return error;
62562
62563 if (root->permissions)
62564 mode = root->permissions(root, current->nsproxy, table);
62565@@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
62566 buffer, lenp, ppos);
62567 }
62568
62569+int proc_dostring_modpriv(struct ctl_table *table, int write,
62570+ void __user *buffer, size_t *lenp, loff_t *ppos)
62571+{
62572+ if (write && !capable(CAP_SYS_MODULE))
62573+ return -EPERM;
62574+
62575+ return _proc_do_string(table->data, table->maxlen, write,
62576+ buffer, lenp, ppos);
62577+}
62578+
62579 static size_t proc_skip_spaces(char **buf)
62580 {
62581 size_t ret;
62582@@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
62583 len = strlen(tmp);
62584 if (len > *size)
62585 len = *size;
62586+ if (len > sizeof(tmp))
62587+ len = sizeof(tmp);
62588 if (copy_to_user(*buf, tmp, len))
62589 return -EFAULT;
62590 *size -= len;
62591@@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
62592 *i = val;
62593 } else {
62594 val = convdiv * (*i) / convmul;
62595- if (!first)
62596+ if (!first) {
62597 err = proc_put_char(&buffer, &left, '\t');
62598+ if (err)
62599+ break;
62600+ }
62601 err = proc_put_long(&buffer, &left, val, false);
62602 if (err)
62603 break;
62604@@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
62605 return -ENOSYS;
62606 }
62607
62608+int proc_dostring_modpriv(struct ctl_table *table, int write,
62609+ void __user *buffer, size_t *lenp, loff_t *ppos)
62610+{
62611+ return -ENOSYS;
62612+}
62613+
62614 int proc_dointvec(struct ctl_table *table, int write,
62615 void __user *buffer, size_t *lenp, loff_t *ppos)
62616 {
62617@@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
62618 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
62619 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
62620 EXPORT_SYMBOL(proc_dostring);
62621+EXPORT_SYMBOL(proc_dostring_modpriv);
62622 EXPORT_SYMBOL(proc_doulongvec_minmax);
62623 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
62624 EXPORT_SYMBOL(register_sysctl_table);
62625diff -urNp linux-3.0.4/kernel/sysctl_check.c linux-3.0.4/kernel/sysctl_check.c
62626--- linux-3.0.4/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
62627+++ linux-3.0.4/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
62628@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
62629 set_fail(&fail, table, "Directory with extra2");
62630 } else {
62631 if ((table->proc_handler == proc_dostring) ||
62632+ (table->proc_handler == proc_dostring_modpriv) ||
62633 (table->proc_handler == proc_dointvec) ||
62634 (table->proc_handler == proc_dointvec_minmax) ||
62635 (table->proc_handler == proc_dointvec_jiffies) ||
62636diff -urNp linux-3.0.4/kernel/taskstats.c linux-3.0.4/kernel/taskstats.c
62637--- linux-3.0.4/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
62638+++ linux-3.0.4/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
62639@@ -27,9 +27,12 @@
62640 #include <linux/cgroup.h>
62641 #include <linux/fs.h>
62642 #include <linux/file.h>
62643+#include <linux/grsecurity.h>
62644 #include <net/genetlink.h>
62645 #include <asm/atomic.h>
62646
62647+extern int gr_is_taskstats_denied(int pid);
62648+
62649 /*
62650 * Maximum length of a cpumask that can be specified in
62651 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
62652@@ -558,6 +561,9 @@ err:
62653
62654 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
62655 {
62656+ if (gr_is_taskstats_denied(current->pid))
62657+ return -EACCES;
62658+
62659 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
62660 return cmd_attr_register_cpumask(info);
62661 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
62662diff -urNp linux-3.0.4/kernel/time/alarmtimer.c linux-3.0.4/kernel/time/alarmtimer.c
62663--- linux-3.0.4/kernel/time/alarmtimer.c 2011-07-21 22:17:23.000000000 -0400
62664+++ linux-3.0.4/kernel/time/alarmtimer.c 2011-08-23 21:47:56.000000000 -0400
62665@@ -685,7 +685,7 @@ static int __init alarmtimer_init(void)
62666 {
62667 int error = 0;
62668 int i;
62669- struct k_clock alarm_clock = {
62670+ static struct k_clock alarm_clock = {
62671 .clock_getres = alarm_clock_getres,
62672 .clock_get = alarm_clock_get,
62673 .timer_create = alarm_timer_create,
62674diff -urNp linux-3.0.4/kernel/time/tick-broadcast.c linux-3.0.4/kernel/time/tick-broadcast.c
62675--- linux-3.0.4/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
62676+++ linux-3.0.4/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
62677@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
62678 * then clear the broadcast bit.
62679 */
62680 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
62681- int cpu = smp_processor_id();
62682+ cpu = smp_processor_id();
62683
62684 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
62685 tick_broadcast_clear_oneshot(cpu);
62686diff -urNp linux-3.0.4/kernel/time/timekeeping.c linux-3.0.4/kernel/time/timekeeping.c
62687--- linux-3.0.4/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
62688+++ linux-3.0.4/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
62689@@ -14,6 +14,7 @@
62690 #include <linux/init.h>
62691 #include <linux/mm.h>
62692 #include <linux/sched.h>
62693+#include <linux/grsecurity.h>
62694 #include <linux/syscore_ops.h>
62695 #include <linux/clocksource.h>
62696 #include <linux/jiffies.h>
62697@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
62698 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
62699 return -EINVAL;
62700
62701+ gr_log_timechange();
62702+
62703 write_seqlock_irqsave(&xtime_lock, flags);
62704
62705 timekeeping_forward_now();
62706diff -urNp linux-3.0.4/kernel/time/timer_list.c linux-3.0.4/kernel/time/timer_list.c
62707--- linux-3.0.4/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
62708+++ linux-3.0.4/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
62709@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
62710
62711 static void print_name_offset(struct seq_file *m, void *sym)
62712 {
62713+#ifdef CONFIG_GRKERNSEC_HIDESYM
62714+ SEQ_printf(m, "<%p>", NULL);
62715+#else
62716 char symname[KSYM_NAME_LEN];
62717
62718 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
62719 SEQ_printf(m, "<%pK>", sym);
62720 else
62721 SEQ_printf(m, "%s", symname);
62722+#endif
62723 }
62724
62725 static void
62726@@ -112,7 +116,11 @@ next_one:
62727 static void
62728 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
62729 {
62730+#ifdef CONFIG_GRKERNSEC_HIDESYM
62731+ SEQ_printf(m, " .base: %p\n", NULL);
62732+#else
62733 SEQ_printf(m, " .base: %pK\n", base);
62734+#endif
62735 SEQ_printf(m, " .index: %d\n",
62736 base->index);
62737 SEQ_printf(m, " .resolution: %Lu nsecs\n",
62738@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
62739 {
62740 struct proc_dir_entry *pe;
62741
62742+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62743+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
62744+#else
62745 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
62746+#endif
62747 if (!pe)
62748 return -ENOMEM;
62749 return 0;
62750diff -urNp linux-3.0.4/kernel/time/timer_stats.c linux-3.0.4/kernel/time/timer_stats.c
62751--- linux-3.0.4/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
62752+++ linux-3.0.4/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
62753@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
62754 static unsigned long nr_entries;
62755 static struct entry entries[MAX_ENTRIES];
62756
62757-static atomic_t overflow_count;
62758+static atomic_unchecked_t overflow_count;
62759
62760 /*
62761 * The entries are in a hash-table, for fast lookup:
62762@@ -140,7 +140,7 @@ static void reset_entries(void)
62763 nr_entries = 0;
62764 memset(entries, 0, sizeof(entries));
62765 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
62766- atomic_set(&overflow_count, 0);
62767+ atomic_set_unchecked(&overflow_count, 0);
62768 }
62769
62770 static struct entry *alloc_entry(void)
62771@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
62772 if (likely(entry))
62773 entry->count++;
62774 else
62775- atomic_inc(&overflow_count);
62776+ atomic_inc_unchecked(&overflow_count);
62777
62778 out_unlock:
62779 raw_spin_unlock_irqrestore(lock, flags);
62780@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
62781
62782 static void print_name_offset(struct seq_file *m, unsigned long addr)
62783 {
62784+#ifdef CONFIG_GRKERNSEC_HIDESYM
62785+ seq_printf(m, "<%p>", NULL);
62786+#else
62787 char symname[KSYM_NAME_LEN];
62788
62789 if (lookup_symbol_name(addr, symname) < 0)
62790 seq_printf(m, "<%p>", (void *)addr);
62791 else
62792 seq_printf(m, "%s", symname);
62793+#endif
62794 }
62795
62796 static int tstats_show(struct seq_file *m, void *v)
62797@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
62798
62799 seq_puts(m, "Timer Stats Version: v0.2\n");
62800 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
62801- if (atomic_read(&overflow_count))
62802+ if (atomic_read_unchecked(&overflow_count))
62803 seq_printf(m, "Overflow: %d entries\n",
62804- atomic_read(&overflow_count));
62805+ atomic_read_unchecked(&overflow_count));
62806
62807 for (i = 0; i < nr_entries; i++) {
62808 entry = entries + i;
62809@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
62810 {
62811 struct proc_dir_entry *pe;
62812
62813+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62814+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
62815+#else
62816 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
62817+#endif
62818 if (!pe)
62819 return -ENOMEM;
62820 return 0;
62821diff -urNp linux-3.0.4/kernel/time.c linux-3.0.4/kernel/time.c
62822--- linux-3.0.4/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
62823+++ linux-3.0.4/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
62824@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
62825 return error;
62826
62827 if (tz) {
62828+ /* we log in do_settimeofday called below, so don't log twice
62829+ */
62830+ if (!tv)
62831+ gr_log_timechange();
62832+
62833 /* SMP safe, global irq locking makes it work. */
62834 sys_tz = *tz;
62835 update_vsyscall_tz();
62836diff -urNp linux-3.0.4/kernel/timer.c linux-3.0.4/kernel/timer.c
62837--- linux-3.0.4/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
62838+++ linux-3.0.4/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
62839@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
62840 /*
62841 * This function runs timers and the timer-tq in bottom half context.
62842 */
62843-static void run_timer_softirq(struct softirq_action *h)
62844+static void run_timer_softirq(void)
62845 {
62846 struct tvec_base *base = __this_cpu_read(tvec_bases);
62847
62848diff -urNp linux-3.0.4/kernel/trace/blktrace.c linux-3.0.4/kernel/trace/blktrace.c
62849--- linux-3.0.4/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
62850+++ linux-3.0.4/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
62851@@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
62852 struct blk_trace *bt = filp->private_data;
62853 char buf[16];
62854
62855- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
62856+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
62857
62858 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
62859 }
62860@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
62861 return 1;
62862
62863 bt = buf->chan->private_data;
62864- atomic_inc(&bt->dropped);
62865+ atomic_inc_unchecked(&bt->dropped);
62866 return 0;
62867 }
62868
62869@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
62870
62871 bt->dir = dir;
62872 bt->dev = dev;
62873- atomic_set(&bt->dropped, 0);
62874+ atomic_set_unchecked(&bt->dropped, 0);
62875
62876 ret = -EIO;
62877 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
62878diff -urNp linux-3.0.4/kernel/trace/ftrace.c linux-3.0.4/kernel/trace/ftrace.c
62879--- linux-3.0.4/kernel/trace/ftrace.c 2011-07-21 22:17:23.000000000 -0400
62880+++ linux-3.0.4/kernel/trace/ftrace.c 2011-08-23 21:47:56.000000000 -0400
62881@@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
62882 if (unlikely(ftrace_disabled))
62883 return 0;
62884
62885+ ret = ftrace_arch_code_modify_prepare();
62886+ FTRACE_WARN_ON(ret);
62887+ if (ret)
62888+ return 0;
62889+
62890 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
62891+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
62892 if (ret) {
62893 ftrace_bug(ret, ip);
62894- return 0;
62895 }
62896- return 1;
62897+ return ret ? 0 : 1;
62898 }
62899
62900 /*
62901@@ -2550,7 +2555,7 @@ static void ftrace_free_entry_rcu(struct
62902
62903 int
62904 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
62905- void *data)
62906+ void *data)
62907 {
62908 struct ftrace_func_probe *entry;
62909 struct ftrace_page *pg;
62910diff -urNp linux-3.0.4/kernel/trace/trace.c linux-3.0.4/kernel/trace/trace.c
62911--- linux-3.0.4/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
62912+++ linux-3.0.4/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
62913@@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
62914 size_t rem;
62915 unsigned int i;
62916
62917+ pax_track_stack();
62918+
62919 if (splice_grow_spd(pipe, &spd))
62920 return -ENOMEM;
62921
62922@@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
62923 int entries, size, i;
62924 size_t ret;
62925
62926+ pax_track_stack();
62927+
62928 if (splice_grow_spd(pipe, &spd))
62929 return -ENOMEM;
62930
62931@@ -3990,10 +3994,9 @@ static const struct file_operations trac
62932 };
62933 #endif
62934
62935-static struct dentry *d_tracer;
62936-
62937 struct dentry *tracing_init_dentry(void)
62938 {
62939+ static struct dentry *d_tracer;
62940 static int once;
62941
62942 if (d_tracer)
62943@@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
62944 return d_tracer;
62945 }
62946
62947-static struct dentry *d_percpu;
62948-
62949 struct dentry *tracing_dentry_percpu(void)
62950 {
62951+ static struct dentry *d_percpu;
62952 static int once;
62953 struct dentry *d_tracer;
62954
62955diff -urNp linux-3.0.4/kernel/trace/trace_events.c linux-3.0.4/kernel/trace/trace_events.c
62956--- linux-3.0.4/kernel/trace/trace_events.c 2011-09-02 18:11:21.000000000 -0400
62957+++ linux-3.0.4/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
62958@@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
62959 struct ftrace_module_file_ops {
62960 struct list_head list;
62961 struct module *mod;
62962- struct file_operations id;
62963- struct file_operations enable;
62964- struct file_operations format;
62965- struct file_operations filter;
62966 };
62967
62968 static struct ftrace_module_file_ops *
62969@@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
62970
62971 file_ops->mod = mod;
62972
62973- file_ops->id = ftrace_event_id_fops;
62974- file_ops->id.owner = mod;
62975-
62976- file_ops->enable = ftrace_enable_fops;
62977- file_ops->enable.owner = mod;
62978-
62979- file_ops->filter = ftrace_event_filter_fops;
62980- file_ops->filter.owner = mod;
62981-
62982- file_ops->format = ftrace_event_format_fops;
62983- file_ops->format.owner = mod;
62984+ pax_open_kernel();
62985+ *(void **)&mod->trace_id.owner = mod;
62986+ *(void **)&mod->trace_enable.owner = mod;
62987+ *(void **)&mod->trace_filter.owner = mod;
62988+ *(void **)&mod->trace_format.owner = mod;
62989+ pax_close_kernel();
62990
62991 list_add(&file_ops->list, &ftrace_module_file_list);
62992
62993@@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
62994
62995 for_each_event(call, start, end) {
62996 __trace_add_event_call(*call, mod,
62997- &file_ops->id, &file_ops->enable,
62998- &file_ops->filter, &file_ops->format);
62999+ &mod->trace_id, &mod->trace_enable,
63000+ &mod->trace_filter, &mod->trace_format);
63001 }
63002 }
63003
63004diff -urNp linux-3.0.4/kernel/trace/trace_mmiotrace.c linux-3.0.4/kernel/trace/trace_mmiotrace.c
63005--- linux-3.0.4/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
63006+++ linux-3.0.4/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
63007@@ -24,7 +24,7 @@ struct header_iter {
63008 static struct trace_array *mmio_trace_array;
63009 static bool overrun_detected;
63010 static unsigned long prev_overruns;
63011-static atomic_t dropped_count;
63012+static atomic_unchecked_t dropped_count;
63013
63014 static void mmio_reset_data(struct trace_array *tr)
63015 {
63016@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
63017
63018 static unsigned long count_overruns(struct trace_iterator *iter)
63019 {
63020- unsigned long cnt = atomic_xchg(&dropped_count, 0);
63021+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
63022 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
63023
63024 if (over > prev_overruns)
63025@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
63026 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
63027 sizeof(*entry), 0, pc);
63028 if (!event) {
63029- atomic_inc(&dropped_count);
63030+ atomic_inc_unchecked(&dropped_count);
63031 return;
63032 }
63033 entry = ring_buffer_event_data(event);
63034@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
63035 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
63036 sizeof(*entry), 0, pc);
63037 if (!event) {
63038- atomic_inc(&dropped_count);
63039+ atomic_inc_unchecked(&dropped_count);
63040 return;
63041 }
63042 entry = ring_buffer_event_data(event);
63043diff -urNp linux-3.0.4/kernel/trace/trace_output.c linux-3.0.4/kernel/trace/trace_output.c
63044--- linux-3.0.4/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
63045+++ linux-3.0.4/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
63046@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
63047
63048 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
63049 if (!IS_ERR(p)) {
63050- p = mangle_path(s->buffer + s->len, p, "\n");
63051+ p = mangle_path(s->buffer + s->len, p, "\n\\");
63052 if (p) {
63053 s->len = p - s->buffer;
63054 return 1;
63055diff -urNp linux-3.0.4/kernel/trace/trace_stack.c linux-3.0.4/kernel/trace/trace_stack.c
63056--- linux-3.0.4/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
63057+++ linux-3.0.4/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
63058@@ -50,7 +50,7 @@ static inline void check_stack(void)
63059 return;
63060
63061 /* we do not handle interrupt stacks yet */
63062- if (!object_is_on_stack(&this_size))
63063+ if (!object_starts_on_stack(&this_size))
63064 return;
63065
63066 local_irq_save(flags);
63067diff -urNp linux-3.0.4/kernel/trace/trace_workqueue.c linux-3.0.4/kernel/trace/trace_workqueue.c
63068--- linux-3.0.4/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
63069+++ linux-3.0.4/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
63070@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
63071 int cpu;
63072 pid_t pid;
63073 /* Can be inserted from interrupt or user context, need to be atomic */
63074- atomic_t inserted;
63075+ atomic_unchecked_t inserted;
63076 /*
63077 * Don't need to be atomic, works are serialized in a single workqueue thread
63078 * on a single CPU.
63079@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
63080 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
63081 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
63082 if (node->pid == wq_thread->pid) {
63083- atomic_inc(&node->inserted);
63084+ atomic_inc_unchecked(&node->inserted);
63085 goto found;
63086 }
63087 }
63088@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
63089 tsk = get_pid_task(pid, PIDTYPE_PID);
63090 if (tsk) {
63091 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
63092- atomic_read(&cws->inserted), cws->executed,
63093+ atomic_read_unchecked(&cws->inserted), cws->executed,
63094 tsk->comm);
63095 put_task_struct(tsk);
63096 }
63097diff -urNp linux-3.0.4/lib/bug.c linux-3.0.4/lib/bug.c
63098--- linux-3.0.4/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
63099+++ linux-3.0.4/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
63100@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
63101 return BUG_TRAP_TYPE_NONE;
63102
63103 bug = find_bug(bugaddr);
63104+ if (!bug)
63105+ return BUG_TRAP_TYPE_NONE;
63106
63107 file = NULL;
63108 line = 0;
63109diff -urNp linux-3.0.4/lib/debugobjects.c linux-3.0.4/lib/debugobjects.c
63110--- linux-3.0.4/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
63111+++ linux-3.0.4/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
63112@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
63113 if (limit > 4)
63114 return;
63115
63116- is_on_stack = object_is_on_stack(addr);
63117+ is_on_stack = object_starts_on_stack(addr);
63118 if (is_on_stack == onstack)
63119 return;
63120
63121diff -urNp linux-3.0.4/lib/dma-debug.c linux-3.0.4/lib/dma-debug.c
63122--- linux-3.0.4/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
63123+++ linux-3.0.4/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
63124@@ -870,7 +870,7 @@ out:
63125
63126 static void check_for_stack(struct device *dev, void *addr)
63127 {
63128- if (object_is_on_stack(addr))
63129+ if (object_starts_on_stack(addr))
63130 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
63131 "stack [addr=%p]\n", addr);
63132 }
63133diff -urNp linux-3.0.4/lib/extable.c linux-3.0.4/lib/extable.c
63134--- linux-3.0.4/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
63135+++ linux-3.0.4/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
63136@@ -13,6 +13,7 @@
63137 #include <linux/init.h>
63138 #include <linux/sort.h>
63139 #include <asm/uaccess.h>
63140+#include <asm/pgtable.h>
63141
63142 #ifndef ARCH_HAS_SORT_EXTABLE
63143 /*
63144@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
63145 void sort_extable(struct exception_table_entry *start,
63146 struct exception_table_entry *finish)
63147 {
63148+ pax_open_kernel();
63149 sort(start, finish - start, sizeof(struct exception_table_entry),
63150 cmp_ex, NULL);
63151+ pax_close_kernel();
63152 }
63153
63154 #ifdef CONFIG_MODULES
63155diff -urNp linux-3.0.4/lib/inflate.c linux-3.0.4/lib/inflate.c
63156--- linux-3.0.4/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
63157+++ linux-3.0.4/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
63158@@ -269,7 +269,7 @@ static void free(void *where)
63159 malloc_ptr = free_mem_ptr;
63160 }
63161 #else
63162-#define malloc(a) kmalloc(a, GFP_KERNEL)
63163+#define malloc(a) kmalloc((a), GFP_KERNEL)
63164 #define free(a) kfree(a)
63165 #endif
63166
63167diff -urNp linux-3.0.4/lib/Kconfig.debug linux-3.0.4/lib/Kconfig.debug
63168--- linux-3.0.4/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
63169+++ linux-3.0.4/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
63170@@ -1088,6 +1088,7 @@ config LATENCYTOP
63171 depends on DEBUG_KERNEL
63172 depends on STACKTRACE_SUPPORT
63173 depends on PROC_FS
63174+ depends on !GRKERNSEC_HIDESYM
63175 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
63176 select KALLSYMS
63177 select KALLSYMS_ALL
63178diff -urNp linux-3.0.4/lib/kref.c linux-3.0.4/lib/kref.c
63179--- linux-3.0.4/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
63180+++ linux-3.0.4/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
63181@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
63182 */
63183 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
63184 {
63185- WARN_ON(release == NULL);
63186+ BUG_ON(release == NULL);
63187 WARN_ON(release == (void (*)(struct kref *))kfree);
63188
63189 if (atomic_dec_and_test(&kref->refcount)) {
63190diff -urNp linux-3.0.4/lib/radix-tree.c linux-3.0.4/lib/radix-tree.c
63191--- linux-3.0.4/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
63192+++ linux-3.0.4/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
63193@@ -80,7 +80,7 @@ struct radix_tree_preload {
63194 int nr;
63195 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
63196 };
63197-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
63198+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
63199
63200 static inline void *ptr_to_indirect(void *ptr)
63201 {
63202diff -urNp linux-3.0.4/lib/vsprintf.c linux-3.0.4/lib/vsprintf.c
63203--- linux-3.0.4/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
63204+++ linux-3.0.4/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
63205@@ -16,6 +16,9 @@
63206 * - scnprintf and vscnprintf
63207 */
63208
63209+#ifdef CONFIG_GRKERNSEC_HIDESYM
63210+#define __INCLUDED_BY_HIDESYM 1
63211+#endif
63212 #include <stdarg.h>
63213 #include <linux/module.h>
63214 #include <linux/types.h>
63215@@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
63216 char sym[KSYM_SYMBOL_LEN];
63217 if (ext == 'B')
63218 sprint_backtrace(sym, value);
63219- else if (ext != 'f' && ext != 's')
63220+ else if (ext != 'f' && ext != 's' && ext != 'a')
63221 sprint_symbol(sym, value);
63222 else
63223 kallsyms_lookup(value, NULL, NULL, NULL, sym);
63224@@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
63225 return string(buf, end, uuid, spec);
63226 }
63227
63228+#ifdef CONFIG_GRKERNSEC_HIDESYM
63229+int kptr_restrict __read_mostly = 2;
63230+#else
63231 int kptr_restrict __read_mostly;
63232+#endif
63233
63234 /*
63235 * Show a '%p' thing. A kernel extension is that the '%p' is followed
63236@@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
63237 * - 'S' For symbolic direct pointers with offset
63238 * - 's' For symbolic direct pointers without offset
63239 * - 'B' For backtraced symbolic direct pointers with offset
63240+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
63241+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
63242 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
63243 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
63244 * - 'M' For a 6-byte MAC address, it prints the address in the
63245@@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
63246 {
63247 if (!ptr && *fmt != 'K') {
63248 /*
63249- * Print (null) with the same width as a pointer so it makes
63250+ * Print (nil) with the same width as a pointer so it makes
63251 * tabular output look nice.
63252 */
63253 if (spec.field_width == -1)
63254 spec.field_width = 2 * sizeof(void *);
63255- return string(buf, end, "(null)", spec);
63256+ return string(buf, end, "(nil)", spec);
63257 }
63258
63259 switch (*fmt) {
63260@@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
63261 /* Fallthrough */
63262 case 'S':
63263 case 's':
63264+#ifdef CONFIG_GRKERNSEC_HIDESYM
63265+ break;
63266+#else
63267+ return symbol_string(buf, end, ptr, spec, *fmt);
63268+#endif
63269+ case 'A':
63270+ case 'a':
63271 case 'B':
63272 return symbol_string(buf, end, ptr, spec, *fmt);
63273 case 'R':
63274@@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
63275 typeof(type) value; \
63276 if (sizeof(type) == 8) { \
63277 args = PTR_ALIGN(args, sizeof(u32)); \
63278- *(u32 *)&value = *(u32 *)args; \
63279- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
63280+ *(u32 *)&value = *(const u32 *)args; \
63281+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
63282 } else { \
63283 args = PTR_ALIGN(args, sizeof(type)); \
63284- value = *(typeof(type) *)args; \
63285+ value = *(const typeof(type) *)args; \
63286 } \
63287 args += sizeof(type); \
63288 value; \
63289@@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
63290 case FORMAT_TYPE_STR: {
63291 const char *str_arg = args;
63292 args += strlen(str_arg) + 1;
63293- str = string(str, end, (char *)str_arg, spec);
63294+ str = string(str, end, str_arg, spec);
63295 break;
63296 }
63297
63298diff -urNp linux-3.0.4/localversion-grsec linux-3.0.4/localversion-grsec
63299--- linux-3.0.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
63300+++ linux-3.0.4/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
63301@@ -0,0 +1 @@
63302+-grsec
63303diff -urNp linux-3.0.4/Makefile linux-3.0.4/Makefile
63304--- linux-3.0.4/Makefile 2011-09-02 18:11:26.000000000 -0400
63305+++ linux-3.0.4/Makefile 2011-09-17 00:56:07.000000000 -0400
63306@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
63307
63308 HOSTCC = gcc
63309 HOSTCXX = g++
63310-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
63311-HOSTCXXFLAGS = -O2
63312+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
63313+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
63314+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
63315
63316 # Decide whether to build built-in, modular, or both.
63317 # Normally, just do built-in.
63318@@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
63319 KBUILD_CPPFLAGS := -D__KERNEL__
63320
63321 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
63322+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
63323 -fno-strict-aliasing -fno-common \
63324 -Werror-implicit-function-declaration \
63325 -Wno-format-security \
63326 -fno-delete-null-pointer-checks
63327+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
63328 KBUILD_AFLAGS_KERNEL :=
63329 KBUILD_CFLAGS_KERNEL :=
63330 KBUILD_AFLAGS := -D__ASSEMBLY__
63331@@ -407,8 +410,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
63332 # Rules shared between *config targets and build targets
63333
63334 # Basic helpers built in scripts/
63335-PHONY += scripts_basic
63336-scripts_basic:
63337+PHONY += scripts_basic gcc-plugins
63338+scripts_basic: gcc-plugins
63339 $(Q)$(MAKE) $(build)=scripts/basic
63340 $(Q)rm -f .tmp_quiet_recordmcount
63341
63342@@ -564,6 +567,31 @@ else
63343 KBUILD_CFLAGS += -O2
63344 endif
63345
63346+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
63347+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
63348+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
63349+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
63350+endif
63351+ifdef CONFIG_KALLOCSTAT_PLUGIN
63352+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
63353+endif
63354+ifdef CONFIG_PAX_MEMORY_STACKLEAK
63355+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
63356+endif
63357+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN)
63358+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN
63359+gcc-plugins:
63360+ $(Q)$(MAKE) $(build)=tools/gcc
63361+else
63362+gcc-plugins:
63363+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
63364+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
63365+else
63366+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
63367+endif
63368+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
63369+endif
63370+
63371 include $(srctree)/arch/$(SRCARCH)/Makefile
63372
63373 ifneq ($(CONFIG_FRAME_WARN),0)
63374@@ -708,7 +736,7 @@ export mod_strip_cmd
63375
63376
63377 ifeq ($(KBUILD_EXTMOD),)
63378-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
63379+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
63380
63381 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
63382 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
63383@@ -907,6 +935,8 @@ define rule_vmlinux-modpost
63384 endef
63385
63386 # vmlinux image - including updated kernel symbols
63387+$(vmlinux-all): KBUILD_CFLAGS += $(GCC_PLUGINS)
63388+$(vmlinux-all): gcc-plugins
63389 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
63390 ifdef CONFIG_HEADERS_CHECK
63391 $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
63392@@ -941,7 +971,8 @@ $(sort $(vmlinux-init) $(vmlinux-main))
63393 # Error messages still appears in the original language
63394
63395 PHONY += $(vmlinux-dirs)
63396-$(vmlinux-dirs): prepare scripts
63397+$(vmlinux-dirs): KBUILD_CFLAGS += $(GCC_PLUGINS)
63398+$(vmlinux-dirs): gcc-plugins prepare scripts
63399 $(Q)$(MAKE) $(build)=$@
63400
63401 # Store (new) KERNELRELASE string in include/config/kernel.release
63402@@ -986,6 +1017,7 @@ prepare0: archprepare FORCE
63403 $(Q)$(MAKE) $(build)=. missing-syscalls
63404
63405 # All the preparing..
63406+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
63407 prepare: prepare0
63408
63409 # Generate some files
63410@@ -1102,7 +1134,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
63411
63412 # Target to prepare building external modules
63413 PHONY += modules_prepare
63414-modules_prepare: prepare scripts
63415+modules_prepare: gcc-plugins prepare scripts
63416
63417 # Target to install modules
63418 PHONY += modules_install
63419@@ -1198,7 +1230,7 @@ distclean: mrproper
63420 @find $(srctree) $(RCS_FIND_IGNORE) \
63421 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
63422 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
63423- -o -name '.*.rej' -o -size 0 \
63424+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
63425 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
63426 -type f -print | xargs rm -f
63427
63428@@ -1359,6 +1391,7 @@ PHONY += $(module-dirs) modules
63429 $(module-dirs): crmodverdir $(objtree)/Module.symvers
63430 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
63431
63432+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
63433 modules: $(module-dirs)
63434 @$(kecho) ' Building modules, stage 2.';
63435 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
63436@@ -1485,17 +1518,19 @@ else
63437 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
63438 endif
63439
63440-%.s: %.c prepare scripts FORCE
63441+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
63442+%.s: %.c gcc-plugins prepare scripts FORCE
63443 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63444 %.i: %.c prepare scripts FORCE
63445 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63446-%.o: %.c prepare scripts FORCE
63447+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
63448+%.o: %.c gcc-plugins prepare scripts FORCE
63449 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63450 %.lst: %.c prepare scripts FORCE
63451 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63452-%.s: %.S prepare scripts FORCE
63453+%.s: %.S gcc-plugins prepare scripts FORCE
63454 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63455-%.o: %.S prepare scripts FORCE
63456+%.o: %.S gcc-plugins prepare scripts FORCE
63457 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63458 %.symtypes: %.c prepare scripts FORCE
63459 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63460@@ -1505,11 +1540,13 @@ endif
63461 $(cmd_crmodverdir)
63462 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
63463 $(build)=$(build-dir)
63464-%/: prepare scripts FORCE
63465+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
63466+%/: gcc-plugins prepare scripts FORCE
63467 $(cmd_crmodverdir)
63468 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
63469 $(build)=$(build-dir)
63470-%.ko: prepare scripts FORCE
63471+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
63472+%.ko: gcc-plugins prepare scripts FORCE
63473 $(cmd_crmodverdir)
63474 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
63475 $(build)=$(build-dir) $(@:.ko=.o)
63476diff -urNp linux-3.0.4/mm/filemap.c linux-3.0.4/mm/filemap.c
63477--- linux-3.0.4/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
63478+++ linux-3.0.4/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
63479@@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
63480 struct address_space *mapping = file->f_mapping;
63481
63482 if (!mapping->a_ops->readpage)
63483- return -ENOEXEC;
63484+ return -ENODEV;
63485 file_accessed(file);
63486 vma->vm_ops = &generic_file_vm_ops;
63487 vma->vm_flags |= VM_CAN_NONLINEAR;
63488@@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
63489 *pos = i_size_read(inode);
63490
63491 if (limit != RLIM_INFINITY) {
63492+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
63493 if (*pos >= limit) {
63494 send_sig(SIGXFSZ, current, 0);
63495 return -EFBIG;
63496diff -urNp linux-3.0.4/mm/fremap.c linux-3.0.4/mm/fremap.c
63497--- linux-3.0.4/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
63498+++ linux-3.0.4/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
63499@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63500 retry:
63501 vma = find_vma(mm, start);
63502
63503+#ifdef CONFIG_PAX_SEGMEXEC
63504+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
63505+ goto out;
63506+#endif
63507+
63508 /*
63509 * Make sure the vma is shared, that it supports prefaulting,
63510 * and that the remapped range is valid and fully within
63511diff -urNp linux-3.0.4/mm/highmem.c linux-3.0.4/mm/highmem.c
63512--- linux-3.0.4/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
63513+++ linux-3.0.4/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
63514@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
63515 * So no dangers, even with speculative execution.
63516 */
63517 page = pte_page(pkmap_page_table[i]);
63518+ pax_open_kernel();
63519 pte_clear(&init_mm, (unsigned long)page_address(page),
63520 &pkmap_page_table[i]);
63521-
63522+ pax_close_kernel();
63523 set_page_address(page, NULL);
63524 need_flush = 1;
63525 }
63526@@ -186,9 +187,11 @@ start:
63527 }
63528 }
63529 vaddr = PKMAP_ADDR(last_pkmap_nr);
63530+
63531+ pax_open_kernel();
63532 set_pte_at(&init_mm, vaddr,
63533 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
63534-
63535+ pax_close_kernel();
63536 pkmap_count[last_pkmap_nr] = 1;
63537 set_page_address(page, (void *)vaddr);
63538
63539diff -urNp linux-3.0.4/mm/huge_memory.c linux-3.0.4/mm/huge_memory.c
63540--- linux-3.0.4/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
63541+++ linux-3.0.4/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
63542@@ -702,7 +702,7 @@ out:
63543 * run pte_offset_map on the pmd, if an huge pmd could
63544 * materialize from under us from a different thread.
63545 */
63546- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
63547+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
63548 return VM_FAULT_OOM;
63549 /* if an huge pmd materialized from under us just retry later */
63550 if (unlikely(pmd_trans_huge(*pmd)))
63551diff -urNp linux-3.0.4/mm/hugetlb.c linux-3.0.4/mm/hugetlb.c
63552--- linux-3.0.4/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
63553+++ linux-3.0.4/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
63554@@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
63555 return 1;
63556 }
63557
63558+#ifdef CONFIG_PAX_SEGMEXEC
63559+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
63560+{
63561+ struct mm_struct *mm = vma->vm_mm;
63562+ struct vm_area_struct *vma_m;
63563+ unsigned long address_m;
63564+ pte_t *ptep_m;
63565+
63566+ vma_m = pax_find_mirror_vma(vma);
63567+ if (!vma_m)
63568+ return;
63569+
63570+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63571+ address_m = address + SEGMEXEC_TASK_SIZE;
63572+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
63573+ get_page(page_m);
63574+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
63575+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
63576+}
63577+#endif
63578+
63579 /*
63580 * Hugetlb_cow() should be called with page lock of the original hugepage held.
63581 */
63582@@ -2440,6 +2461,11 @@ retry_avoidcopy:
63583 make_huge_pte(vma, new_page, 1));
63584 page_remove_rmap(old_page);
63585 hugepage_add_new_anon_rmap(new_page, vma, address);
63586+
63587+#ifdef CONFIG_PAX_SEGMEXEC
63588+ pax_mirror_huge_pte(vma, address, new_page);
63589+#endif
63590+
63591 /* Make the old page be freed below */
63592 new_page = old_page;
63593 mmu_notifier_invalidate_range_end(mm,
63594@@ -2591,6 +2617,10 @@ retry:
63595 && (vma->vm_flags & VM_SHARED)));
63596 set_huge_pte_at(mm, address, ptep, new_pte);
63597
63598+#ifdef CONFIG_PAX_SEGMEXEC
63599+ pax_mirror_huge_pte(vma, address, page);
63600+#endif
63601+
63602 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
63603 /* Optimization, do the COW without a second fault */
63604 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
63605@@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
63606 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
63607 struct hstate *h = hstate_vma(vma);
63608
63609+#ifdef CONFIG_PAX_SEGMEXEC
63610+ struct vm_area_struct *vma_m;
63611+#endif
63612+
63613 ptep = huge_pte_offset(mm, address);
63614 if (ptep) {
63615 entry = huge_ptep_get(ptep);
63616@@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
63617 VM_FAULT_SET_HINDEX(h - hstates);
63618 }
63619
63620+#ifdef CONFIG_PAX_SEGMEXEC
63621+ vma_m = pax_find_mirror_vma(vma);
63622+ if (vma_m) {
63623+ unsigned long address_m;
63624+
63625+ if (vma->vm_start > vma_m->vm_start) {
63626+ address_m = address;
63627+ address -= SEGMEXEC_TASK_SIZE;
63628+ vma = vma_m;
63629+ h = hstate_vma(vma);
63630+ } else
63631+ address_m = address + SEGMEXEC_TASK_SIZE;
63632+
63633+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
63634+ return VM_FAULT_OOM;
63635+ address_m &= HPAGE_MASK;
63636+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
63637+ }
63638+#endif
63639+
63640 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
63641 if (!ptep)
63642 return VM_FAULT_OOM;
63643diff -urNp linux-3.0.4/mm/internal.h linux-3.0.4/mm/internal.h
63644--- linux-3.0.4/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
63645+++ linux-3.0.4/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
63646@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
63647 * in mm/page_alloc.c
63648 */
63649 extern void __free_pages_bootmem(struct page *page, unsigned int order);
63650+extern void free_compound_page(struct page *page);
63651 extern void prep_compound_page(struct page *page, unsigned long order);
63652 #ifdef CONFIG_MEMORY_FAILURE
63653 extern bool is_free_buddy_page(struct page *page);
63654diff -urNp linux-3.0.4/mm/Kconfig linux-3.0.4/mm/Kconfig
63655--- linux-3.0.4/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
63656+++ linux-3.0.4/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
63657@@ -240,7 +240,7 @@ config KSM
63658 config DEFAULT_MMAP_MIN_ADDR
63659 int "Low address space to protect from user allocation"
63660 depends on MMU
63661- default 4096
63662+ default 65536
63663 help
63664 This is the portion of low virtual memory which should be protected
63665 from userspace allocation. Keeping a user from writing to low pages
63666diff -urNp linux-3.0.4/mm/kmemleak.c linux-3.0.4/mm/kmemleak.c
63667--- linux-3.0.4/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
63668+++ linux-3.0.4/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
63669@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
63670
63671 for (i = 0; i < object->trace_len; i++) {
63672 void *ptr = (void *)object->trace[i];
63673- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
63674+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
63675 }
63676 }
63677
63678diff -urNp linux-3.0.4/mm/madvise.c linux-3.0.4/mm/madvise.c
63679--- linux-3.0.4/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
63680+++ linux-3.0.4/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
63681@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
63682 pgoff_t pgoff;
63683 unsigned long new_flags = vma->vm_flags;
63684
63685+#ifdef CONFIG_PAX_SEGMEXEC
63686+ struct vm_area_struct *vma_m;
63687+#endif
63688+
63689 switch (behavior) {
63690 case MADV_NORMAL:
63691 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
63692@@ -110,6 +114,13 @@ success:
63693 /*
63694 * vm_flags is protected by the mmap_sem held in write mode.
63695 */
63696+
63697+#ifdef CONFIG_PAX_SEGMEXEC
63698+ vma_m = pax_find_mirror_vma(vma);
63699+ if (vma_m)
63700+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
63701+#endif
63702+
63703 vma->vm_flags = new_flags;
63704
63705 out:
63706@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
63707 struct vm_area_struct ** prev,
63708 unsigned long start, unsigned long end)
63709 {
63710+
63711+#ifdef CONFIG_PAX_SEGMEXEC
63712+ struct vm_area_struct *vma_m;
63713+#endif
63714+
63715 *prev = vma;
63716 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
63717 return -EINVAL;
63718@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
63719 zap_page_range(vma, start, end - start, &details);
63720 } else
63721 zap_page_range(vma, start, end - start, NULL);
63722+
63723+#ifdef CONFIG_PAX_SEGMEXEC
63724+ vma_m = pax_find_mirror_vma(vma);
63725+ if (vma_m) {
63726+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
63727+ struct zap_details details = {
63728+ .nonlinear_vma = vma_m,
63729+ .last_index = ULONG_MAX,
63730+ };
63731+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
63732+ } else
63733+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
63734+ }
63735+#endif
63736+
63737 return 0;
63738 }
63739
63740@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
63741 if (end < start)
63742 goto out;
63743
63744+#ifdef CONFIG_PAX_SEGMEXEC
63745+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63746+ if (end > SEGMEXEC_TASK_SIZE)
63747+ goto out;
63748+ } else
63749+#endif
63750+
63751+ if (end > TASK_SIZE)
63752+ goto out;
63753+
63754 error = 0;
63755 if (end == start)
63756 goto out;
63757diff -urNp linux-3.0.4/mm/memory.c linux-3.0.4/mm/memory.c
63758--- linux-3.0.4/mm/memory.c 2011-09-02 18:11:21.000000000 -0400
63759+++ linux-3.0.4/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
63760@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
63761 return;
63762
63763 pmd = pmd_offset(pud, start);
63764+
63765+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
63766 pud_clear(pud);
63767 pmd_free_tlb(tlb, pmd, start);
63768+#endif
63769+
63770 }
63771
63772 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
63773@@ -489,9 +493,12 @@ static inline void free_pud_range(struct
63774 if (end - 1 > ceiling - 1)
63775 return;
63776
63777+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
63778 pud = pud_offset(pgd, start);
63779 pgd_clear(pgd);
63780 pud_free_tlb(tlb, pud, start);
63781+#endif
63782+
63783 }
63784
63785 /*
63786@@ -1577,12 +1584,6 @@ no_page_table:
63787 return page;
63788 }
63789
63790-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
63791-{
63792- return stack_guard_page_start(vma, addr) ||
63793- stack_guard_page_end(vma, addr+PAGE_SIZE);
63794-}
63795-
63796 /**
63797 * __get_user_pages() - pin user pages in memory
63798 * @tsk: task_struct of target task
63799@@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
63800 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
63801 i = 0;
63802
63803- do {
63804+ while (nr_pages) {
63805 struct vm_area_struct *vma;
63806
63807- vma = find_extend_vma(mm, start);
63808+ vma = find_vma(mm, start);
63809 if (!vma && in_gate_area(mm, start)) {
63810 unsigned long pg = start & PAGE_MASK;
63811 pgd_t *pgd;
63812@@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
63813 goto next_page;
63814 }
63815
63816- if (!vma ||
63817+ if (!vma || start < vma->vm_start ||
63818 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
63819 !(vm_flags & vma->vm_flags))
63820 return i ? : -EFAULT;
63821@@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
63822 int ret;
63823 unsigned int fault_flags = 0;
63824
63825- /* For mlock, just skip the stack guard page. */
63826- if (foll_flags & FOLL_MLOCK) {
63827- if (stack_guard_page(vma, start))
63828- goto next_page;
63829- }
63830 if (foll_flags & FOLL_WRITE)
63831 fault_flags |= FAULT_FLAG_WRITE;
63832 if (nonblocking)
63833@@ -1811,7 +1807,7 @@ next_page:
63834 start += PAGE_SIZE;
63835 nr_pages--;
63836 } while (nr_pages && start < vma->vm_end);
63837- } while (nr_pages);
63838+ }
63839 return i;
63840 }
63841 EXPORT_SYMBOL(__get_user_pages);
63842@@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
63843 page_add_file_rmap(page);
63844 set_pte_at(mm, addr, pte, mk_pte(page, prot));
63845
63846+#ifdef CONFIG_PAX_SEGMEXEC
63847+ pax_mirror_file_pte(vma, addr, page, ptl);
63848+#endif
63849+
63850 retval = 0;
63851 pte_unmap_unlock(pte, ptl);
63852 return retval;
63853@@ -2052,10 +2052,22 @@ out:
63854 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
63855 struct page *page)
63856 {
63857+
63858+#ifdef CONFIG_PAX_SEGMEXEC
63859+ struct vm_area_struct *vma_m;
63860+#endif
63861+
63862 if (addr < vma->vm_start || addr >= vma->vm_end)
63863 return -EFAULT;
63864 if (!page_count(page))
63865 return -EINVAL;
63866+
63867+#ifdef CONFIG_PAX_SEGMEXEC
63868+ vma_m = pax_find_mirror_vma(vma);
63869+ if (vma_m)
63870+ vma_m->vm_flags |= VM_INSERTPAGE;
63871+#endif
63872+
63873 vma->vm_flags |= VM_INSERTPAGE;
63874 return insert_page(vma, addr, page, vma->vm_page_prot);
63875 }
63876@@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
63877 unsigned long pfn)
63878 {
63879 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
63880+ BUG_ON(vma->vm_mirror);
63881
63882 if (addr < vma->vm_start || addr >= vma->vm_end)
63883 return -EFAULT;
63884@@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
63885 copy_user_highpage(dst, src, va, vma);
63886 }
63887
63888+#ifdef CONFIG_PAX_SEGMEXEC
63889+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
63890+{
63891+ struct mm_struct *mm = vma->vm_mm;
63892+ spinlock_t *ptl;
63893+ pte_t *pte, entry;
63894+
63895+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
63896+ entry = *pte;
63897+ if (!pte_present(entry)) {
63898+ if (!pte_none(entry)) {
63899+ BUG_ON(pte_file(entry));
63900+ free_swap_and_cache(pte_to_swp_entry(entry));
63901+ pte_clear_not_present_full(mm, address, pte, 0);
63902+ }
63903+ } else {
63904+ struct page *page;
63905+
63906+ flush_cache_page(vma, address, pte_pfn(entry));
63907+ entry = ptep_clear_flush(vma, address, pte);
63908+ BUG_ON(pte_dirty(entry));
63909+ page = vm_normal_page(vma, address, entry);
63910+ if (page) {
63911+ update_hiwater_rss(mm);
63912+ if (PageAnon(page))
63913+ dec_mm_counter_fast(mm, MM_ANONPAGES);
63914+ else
63915+ dec_mm_counter_fast(mm, MM_FILEPAGES);
63916+ page_remove_rmap(page);
63917+ page_cache_release(page);
63918+ }
63919+ }
63920+ pte_unmap_unlock(pte, ptl);
63921+}
63922+
63923+/* PaX: if vma is mirrored, synchronize the mirror's PTE
63924+ *
63925+ * the ptl of the lower mapped page is held on entry and is not released on exit
63926+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
63927+ */
63928+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63929+{
63930+ struct mm_struct *mm = vma->vm_mm;
63931+ unsigned long address_m;
63932+ spinlock_t *ptl_m;
63933+ struct vm_area_struct *vma_m;
63934+ pmd_t *pmd_m;
63935+ pte_t *pte_m, entry_m;
63936+
63937+ BUG_ON(!page_m || !PageAnon(page_m));
63938+
63939+ vma_m = pax_find_mirror_vma(vma);
63940+ if (!vma_m)
63941+ return;
63942+
63943+ BUG_ON(!PageLocked(page_m));
63944+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63945+ address_m = address + SEGMEXEC_TASK_SIZE;
63946+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63947+ pte_m = pte_offset_map(pmd_m, address_m);
63948+ ptl_m = pte_lockptr(mm, pmd_m);
63949+ if (ptl != ptl_m) {
63950+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63951+ if (!pte_none(*pte_m))
63952+ goto out;
63953+ }
63954+
63955+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63956+ page_cache_get(page_m);
63957+ page_add_anon_rmap(page_m, vma_m, address_m);
63958+ inc_mm_counter_fast(mm, MM_ANONPAGES);
63959+ set_pte_at(mm, address_m, pte_m, entry_m);
63960+ update_mmu_cache(vma_m, address_m, entry_m);
63961+out:
63962+ if (ptl != ptl_m)
63963+ spin_unlock(ptl_m);
63964+ pte_unmap(pte_m);
63965+ unlock_page(page_m);
63966+}
63967+
63968+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63969+{
63970+ struct mm_struct *mm = vma->vm_mm;
63971+ unsigned long address_m;
63972+ spinlock_t *ptl_m;
63973+ struct vm_area_struct *vma_m;
63974+ pmd_t *pmd_m;
63975+ pte_t *pte_m, entry_m;
63976+
63977+ BUG_ON(!page_m || PageAnon(page_m));
63978+
63979+ vma_m = pax_find_mirror_vma(vma);
63980+ if (!vma_m)
63981+ return;
63982+
63983+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63984+ address_m = address + SEGMEXEC_TASK_SIZE;
63985+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63986+ pte_m = pte_offset_map(pmd_m, address_m);
63987+ ptl_m = pte_lockptr(mm, pmd_m);
63988+ if (ptl != ptl_m) {
63989+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63990+ if (!pte_none(*pte_m))
63991+ goto out;
63992+ }
63993+
63994+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63995+ page_cache_get(page_m);
63996+ page_add_file_rmap(page_m);
63997+ inc_mm_counter_fast(mm, MM_FILEPAGES);
63998+ set_pte_at(mm, address_m, pte_m, entry_m);
63999+ update_mmu_cache(vma_m, address_m, entry_m);
64000+out:
64001+ if (ptl != ptl_m)
64002+ spin_unlock(ptl_m);
64003+ pte_unmap(pte_m);
64004+}
64005+
64006+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
64007+{
64008+ struct mm_struct *mm = vma->vm_mm;
64009+ unsigned long address_m;
64010+ spinlock_t *ptl_m;
64011+ struct vm_area_struct *vma_m;
64012+ pmd_t *pmd_m;
64013+ pte_t *pte_m, entry_m;
64014+
64015+ vma_m = pax_find_mirror_vma(vma);
64016+ if (!vma_m)
64017+ return;
64018+
64019+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64020+ address_m = address + SEGMEXEC_TASK_SIZE;
64021+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64022+ pte_m = pte_offset_map(pmd_m, address_m);
64023+ ptl_m = pte_lockptr(mm, pmd_m);
64024+ if (ptl != ptl_m) {
64025+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64026+ if (!pte_none(*pte_m))
64027+ goto out;
64028+ }
64029+
64030+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
64031+ set_pte_at(mm, address_m, pte_m, entry_m);
64032+out:
64033+ if (ptl != ptl_m)
64034+ spin_unlock(ptl_m);
64035+ pte_unmap(pte_m);
64036+}
64037+
64038+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
64039+{
64040+ struct page *page_m;
64041+ pte_t entry;
64042+
64043+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
64044+ goto out;
64045+
64046+ entry = *pte;
64047+ page_m = vm_normal_page(vma, address, entry);
64048+ if (!page_m)
64049+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
64050+ else if (PageAnon(page_m)) {
64051+ if (pax_find_mirror_vma(vma)) {
64052+ pte_unmap_unlock(pte, ptl);
64053+ lock_page(page_m);
64054+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
64055+ if (pte_same(entry, *pte))
64056+ pax_mirror_anon_pte(vma, address, page_m, ptl);
64057+ else
64058+ unlock_page(page_m);
64059+ }
64060+ } else
64061+ pax_mirror_file_pte(vma, address, page_m, ptl);
64062+
64063+out:
64064+ pte_unmap_unlock(pte, ptl);
64065+}
64066+#endif
64067+
64068 /*
64069 * This routine handles present pages, when users try to write
64070 * to a shared page. It is done by copying the page to a new address
64071@@ -2667,6 +2860,12 @@ gotten:
64072 */
64073 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
64074 if (likely(pte_same(*page_table, orig_pte))) {
64075+
64076+#ifdef CONFIG_PAX_SEGMEXEC
64077+ if (pax_find_mirror_vma(vma))
64078+ BUG_ON(!trylock_page(new_page));
64079+#endif
64080+
64081 if (old_page) {
64082 if (!PageAnon(old_page)) {
64083 dec_mm_counter_fast(mm, MM_FILEPAGES);
64084@@ -2718,6 +2917,10 @@ gotten:
64085 page_remove_rmap(old_page);
64086 }
64087
64088+#ifdef CONFIG_PAX_SEGMEXEC
64089+ pax_mirror_anon_pte(vma, address, new_page, ptl);
64090+#endif
64091+
64092 /* Free the old page.. */
64093 new_page = old_page;
64094 ret |= VM_FAULT_WRITE;
64095@@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
64096 swap_free(entry);
64097 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
64098 try_to_free_swap(page);
64099+
64100+#ifdef CONFIG_PAX_SEGMEXEC
64101+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
64102+#endif
64103+
64104 unlock_page(page);
64105 if (swapcache) {
64106 /*
64107@@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
64108
64109 /* No need to invalidate - it was non-present before */
64110 update_mmu_cache(vma, address, page_table);
64111+
64112+#ifdef CONFIG_PAX_SEGMEXEC
64113+ pax_mirror_anon_pte(vma, address, page, ptl);
64114+#endif
64115+
64116 unlock:
64117 pte_unmap_unlock(page_table, ptl);
64118 out:
64119@@ -3039,40 +3252,6 @@ out_release:
64120 }
64121
64122 /*
64123- * This is like a special single-page "expand_{down|up}wards()",
64124- * except we must first make sure that 'address{-|+}PAGE_SIZE'
64125- * doesn't hit another vma.
64126- */
64127-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
64128-{
64129- address &= PAGE_MASK;
64130- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
64131- struct vm_area_struct *prev = vma->vm_prev;
64132-
64133- /*
64134- * Is there a mapping abutting this one below?
64135- *
64136- * That's only ok if it's the same stack mapping
64137- * that has gotten split..
64138- */
64139- if (prev && prev->vm_end == address)
64140- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
64141-
64142- expand_downwards(vma, address - PAGE_SIZE);
64143- }
64144- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
64145- struct vm_area_struct *next = vma->vm_next;
64146-
64147- /* As VM_GROWSDOWN but s/below/above/ */
64148- if (next && next->vm_start == address + PAGE_SIZE)
64149- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
64150-
64151- expand_upwards(vma, address + PAGE_SIZE);
64152- }
64153- return 0;
64154-}
64155-
64156-/*
64157 * We enter with non-exclusive mmap_sem (to exclude vma changes,
64158 * but allow concurrent faults), and pte mapped but not yet locked.
64159 * We return with mmap_sem still held, but pte unmapped and unlocked.
64160@@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
64161 unsigned long address, pte_t *page_table, pmd_t *pmd,
64162 unsigned int flags)
64163 {
64164- struct page *page;
64165+ struct page *page = NULL;
64166 spinlock_t *ptl;
64167 pte_t entry;
64168
64169- pte_unmap(page_table);
64170-
64171- /* Check if we need to add a guard page to the stack */
64172- if (check_stack_guard_page(vma, address) < 0)
64173- return VM_FAULT_SIGBUS;
64174-
64175- /* Use the zero-page for reads */
64176 if (!(flags & FAULT_FLAG_WRITE)) {
64177 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
64178 vma->vm_page_prot));
64179- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
64180+ ptl = pte_lockptr(mm, pmd);
64181+ spin_lock(ptl);
64182 if (!pte_none(*page_table))
64183 goto unlock;
64184 goto setpte;
64185 }
64186
64187 /* Allocate our own private page. */
64188+ pte_unmap(page_table);
64189+
64190 if (unlikely(anon_vma_prepare(vma)))
64191 goto oom;
64192 page = alloc_zeroed_user_highpage_movable(vma, address);
64193@@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
64194 if (!pte_none(*page_table))
64195 goto release;
64196
64197+#ifdef CONFIG_PAX_SEGMEXEC
64198+ if (pax_find_mirror_vma(vma))
64199+ BUG_ON(!trylock_page(page));
64200+#endif
64201+
64202 inc_mm_counter_fast(mm, MM_ANONPAGES);
64203 page_add_new_anon_rmap(page, vma, address);
64204 setpte:
64205@@ -3127,6 +3307,12 @@ setpte:
64206
64207 /* No need to invalidate - it was non-present before */
64208 update_mmu_cache(vma, address, page_table);
64209+
64210+#ifdef CONFIG_PAX_SEGMEXEC
64211+ if (page)
64212+ pax_mirror_anon_pte(vma, address, page, ptl);
64213+#endif
64214+
64215 unlock:
64216 pte_unmap_unlock(page_table, ptl);
64217 return 0;
64218@@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
64219 */
64220 /* Only go through if we didn't race with anybody else... */
64221 if (likely(pte_same(*page_table, orig_pte))) {
64222+
64223+#ifdef CONFIG_PAX_SEGMEXEC
64224+ if (anon && pax_find_mirror_vma(vma))
64225+ BUG_ON(!trylock_page(page));
64226+#endif
64227+
64228 flush_icache_page(vma, page);
64229 entry = mk_pte(page, vma->vm_page_prot);
64230 if (flags & FAULT_FLAG_WRITE)
64231@@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
64232
64233 /* no need to invalidate: a not-present page won't be cached */
64234 update_mmu_cache(vma, address, page_table);
64235+
64236+#ifdef CONFIG_PAX_SEGMEXEC
64237+ if (anon)
64238+ pax_mirror_anon_pte(vma, address, page, ptl);
64239+ else
64240+ pax_mirror_file_pte(vma, address, page, ptl);
64241+#endif
64242+
64243 } else {
64244 if (charged)
64245 mem_cgroup_uncharge_page(page);
64246@@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
64247 if (flags & FAULT_FLAG_WRITE)
64248 flush_tlb_fix_spurious_fault(vma, address);
64249 }
64250+
64251+#ifdef CONFIG_PAX_SEGMEXEC
64252+ pax_mirror_pte(vma, address, pte, pmd, ptl);
64253+ return 0;
64254+#endif
64255+
64256 unlock:
64257 pte_unmap_unlock(pte, ptl);
64258 return 0;
64259@@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
64260 pmd_t *pmd;
64261 pte_t *pte;
64262
64263+#ifdef CONFIG_PAX_SEGMEXEC
64264+ struct vm_area_struct *vma_m;
64265+#endif
64266+
64267 __set_current_state(TASK_RUNNING);
64268
64269 count_vm_event(PGFAULT);
64270@@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
64271 if (unlikely(is_vm_hugetlb_page(vma)))
64272 return hugetlb_fault(mm, vma, address, flags);
64273
64274+#ifdef CONFIG_PAX_SEGMEXEC
64275+ vma_m = pax_find_mirror_vma(vma);
64276+ if (vma_m) {
64277+ unsigned long address_m;
64278+ pgd_t *pgd_m;
64279+ pud_t *pud_m;
64280+ pmd_t *pmd_m;
64281+
64282+ if (vma->vm_start > vma_m->vm_start) {
64283+ address_m = address;
64284+ address -= SEGMEXEC_TASK_SIZE;
64285+ vma = vma_m;
64286+ } else
64287+ address_m = address + SEGMEXEC_TASK_SIZE;
64288+
64289+ pgd_m = pgd_offset(mm, address_m);
64290+ pud_m = pud_alloc(mm, pgd_m, address_m);
64291+ if (!pud_m)
64292+ return VM_FAULT_OOM;
64293+ pmd_m = pmd_alloc(mm, pud_m, address_m);
64294+ if (!pmd_m)
64295+ return VM_FAULT_OOM;
64296+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
64297+ return VM_FAULT_OOM;
64298+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
64299+ }
64300+#endif
64301+
64302 pgd = pgd_offset(mm, address);
64303 pud = pud_alloc(mm, pgd, address);
64304 if (!pud)
64305@@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
64306 * run pte_offset_map on the pmd, if an huge pmd could
64307 * materialize from under us from a different thread.
64308 */
64309- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
64310+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
64311 return VM_FAULT_OOM;
64312 /* if an huge pmd materialized from under us just retry later */
64313 if (unlikely(pmd_trans_huge(*pmd)))
64314@@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
64315 gate_vma.vm_start = FIXADDR_USER_START;
64316 gate_vma.vm_end = FIXADDR_USER_END;
64317 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
64318- gate_vma.vm_page_prot = __P101;
64319+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
64320 /*
64321 * Make sure the vDSO gets into every core dump.
64322 * Dumping its contents makes post-mortem fully interpretable later
64323diff -urNp linux-3.0.4/mm/memory-failure.c linux-3.0.4/mm/memory-failure.c
64324--- linux-3.0.4/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
64325+++ linux-3.0.4/mm/memory-failure.c 2011-08-23 21:47:56.000000000 -0400
64326@@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
64327
64328 int sysctl_memory_failure_recovery __read_mostly = 1;
64329
64330-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64331+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64332
64333 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
64334
64335@@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
64336 }
64337
64338 nr_pages = 1 << compound_trans_order(hpage);
64339- atomic_long_add(nr_pages, &mce_bad_pages);
64340+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
64341
64342 /*
64343 * We need/can do nothing about count=0 pages.
64344@@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
64345 if (!PageHWPoison(hpage)
64346 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
64347 || (p != hpage && TestSetPageHWPoison(hpage))) {
64348- atomic_long_sub(nr_pages, &mce_bad_pages);
64349+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64350 return 0;
64351 }
64352 set_page_hwpoison_huge_page(hpage);
64353@@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
64354 }
64355 if (hwpoison_filter(p)) {
64356 if (TestClearPageHWPoison(p))
64357- atomic_long_sub(nr_pages, &mce_bad_pages);
64358+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64359 unlock_page(hpage);
64360 put_page(hpage);
64361 return 0;
64362@@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
64363 return 0;
64364 }
64365 if (TestClearPageHWPoison(p))
64366- atomic_long_sub(nr_pages, &mce_bad_pages);
64367+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64368 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
64369 return 0;
64370 }
64371@@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
64372 */
64373 if (TestClearPageHWPoison(page)) {
64374 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
64375- atomic_long_sub(nr_pages, &mce_bad_pages);
64376+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64377 freeit = 1;
64378 if (PageHuge(page))
64379 clear_page_hwpoison_huge_page(page);
64380@@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
64381 }
64382 done:
64383 if (!PageHWPoison(hpage))
64384- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
64385+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
64386 set_page_hwpoison_huge_page(hpage);
64387 dequeue_hwpoisoned_huge_page(hpage);
64388 /* keep elevated page count for bad page */
64389@@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
64390 return ret;
64391
64392 done:
64393- atomic_long_add(1, &mce_bad_pages);
64394+ atomic_long_add_unchecked(1, &mce_bad_pages);
64395 SetPageHWPoison(page);
64396 /* keep elevated page count for bad page */
64397 return ret;
64398diff -urNp linux-3.0.4/mm/mempolicy.c linux-3.0.4/mm/mempolicy.c
64399--- linux-3.0.4/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
64400+++ linux-3.0.4/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
64401@@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
64402 unsigned long vmstart;
64403 unsigned long vmend;
64404
64405+#ifdef CONFIG_PAX_SEGMEXEC
64406+ struct vm_area_struct *vma_m;
64407+#endif
64408+
64409 vma = find_vma_prev(mm, start, &prev);
64410 if (!vma || vma->vm_start > start)
64411 return -EFAULT;
64412@@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
64413 err = policy_vma(vma, new_pol);
64414 if (err)
64415 goto out;
64416+
64417+#ifdef CONFIG_PAX_SEGMEXEC
64418+ vma_m = pax_find_mirror_vma(vma);
64419+ if (vma_m) {
64420+ err = policy_vma(vma_m, new_pol);
64421+ if (err)
64422+ goto out;
64423+ }
64424+#endif
64425+
64426 }
64427
64428 out:
64429@@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
64430
64431 if (end < start)
64432 return -EINVAL;
64433+
64434+#ifdef CONFIG_PAX_SEGMEXEC
64435+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
64436+ if (end > SEGMEXEC_TASK_SIZE)
64437+ return -EINVAL;
64438+ } else
64439+#endif
64440+
64441+ if (end > TASK_SIZE)
64442+ return -EINVAL;
64443+
64444 if (end == start)
64445 return 0;
64446
64447@@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64448 if (!mm)
64449 goto out;
64450
64451+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64452+ if (mm != current->mm &&
64453+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64454+ err = -EPERM;
64455+ goto out;
64456+ }
64457+#endif
64458+
64459 /*
64460 * Check if this process has the right to modify the specified
64461 * process. The right exists if the process has administrative
64462@@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64463 rcu_read_lock();
64464 tcred = __task_cred(task);
64465 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64466- cred->uid != tcred->suid && cred->uid != tcred->uid &&
64467- !capable(CAP_SYS_NICE)) {
64468+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64469 rcu_read_unlock();
64470 err = -EPERM;
64471 goto out;
64472diff -urNp linux-3.0.4/mm/migrate.c linux-3.0.4/mm/migrate.c
64473--- linux-3.0.4/mm/migrate.c 2011-07-21 22:17:23.000000000 -0400
64474+++ linux-3.0.4/mm/migrate.c 2011-08-23 21:48:14.000000000 -0400
64475@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
64476 unsigned long chunk_start;
64477 int err;
64478
64479+ pax_track_stack();
64480+
64481 task_nodes = cpuset_mems_allowed(task);
64482
64483 err = -ENOMEM;
64484@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64485 if (!mm)
64486 return -EINVAL;
64487
64488+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64489+ if (mm != current->mm &&
64490+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64491+ err = -EPERM;
64492+ goto out;
64493+ }
64494+#endif
64495+
64496 /*
64497 * Check if this process has the right to modify the specified
64498 * process. The right exists if the process has administrative
64499@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64500 rcu_read_lock();
64501 tcred = __task_cred(task);
64502 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64503- cred->uid != tcred->suid && cred->uid != tcred->uid &&
64504- !capable(CAP_SYS_NICE)) {
64505+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64506 rcu_read_unlock();
64507 err = -EPERM;
64508 goto out;
64509diff -urNp linux-3.0.4/mm/mlock.c linux-3.0.4/mm/mlock.c
64510--- linux-3.0.4/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
64511+++ linux-3.0.4/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
64512@@ -13,6 +13,7 @@
64513 #include <linux/pagemap.h>
64514 #include <linux/mempolicy.h>
64515 #include <linux/syscalls.h>
64516+#include <linux/security.h>
64517 #include <linux/sched.h>
64518 #include <linux/module.h>
64519 #include <linux/rmap.h>
64520@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
64521 return -EINVAL;
64522 if (end == start)
64523 return 0;
64524+ if (end > TASK_SIZE)
64525+ return -EINVAL;
64526+
64527 vma = find_vma_prev(current->mm, start, &prev);
64528 if (!vma || vma->vm_start > start)
64529 return -ENOMEM;
64530@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
64531 for (nstart = start ; ; ) {
64532 vm_flags_t newflags;
64533
64534+#ifdef CONFIG_PAX_SEGMEXEC
64535+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64536+ break;
64537+#endif
64538+
64539 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
64540
64541 newflags = vma->vm_flags | VM_LOCKED;
64542@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
64543 lock_limit >>= PAGE_SHIFT;
64544
64545 /* check against resource limits */
64546+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
64547 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
64548 error = do_mlock(start, len, 1);
64549 up_write(&current->mm->mmap_sem);
64550@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
64551 static int do_mlockall(int flags)
64552 {
64553 struct vm_area_struct * vma, * prev = NULL;
64554- unsigned int def_flags = 0;
64555
64556 if (flags & MCL_FUTURE)
64557- def_flags = VM_LOCKED;
64558- current->mm->def_flags = def_flags;
64559+ current->mm->def_flags |= VM_LOCKED;
64560+ else
64561+ current->mm->def_flags &= ~VM_LOCKED;
64562 if (flags == MCL_FUTURE)
64563 goto out;
64564
64565 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
64566 vm_flags_t newflags;
64567
64568+#ifdef CONFIG_PAX_SEGMEXEC
64569+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64570+ break;
64571+#endif
64572+
64573+ BUG_ON(vma->vm_end > TASK_SIZE);
64574 newflags = vma->vm_flags | VM_LOCKED;
64575 if (!(flags & MCL_CURRENT))
64576 newflags &= ~VM_LOCKED;
64577@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
64578 lock_limit >>= PAGE_SHIFT;
64579
64580 ret = -ENOMEM;
64581+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
64582 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
64583 capable(CAP_IPC_LOCK))
64584 ret = do_mlockall(flags);
64585diff -urNp linux-3.0.4/mm/mmap.c linux-3.0.4/mm/mmap.c
64586--- linux-3.0.4/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
64587+++ linux-3.0.4/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
64588@@ -46,6 +46,16 @@
64589 #define arch_rebalance_pgtables(addr, len) (addr)
64590 #endif
64591
64592+static inline void verify_mm_writelocked(struct mm_struct *mm)
64593+{
64594+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
64595+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
64596+ up_read(&mm->mmap_sem);
64597+ BUG();
64598+ }
64599+#endif
64600+}
64601+
64602 static void unmap_region(struct mm_struct *mm,
64603 struct vm_area_struct *vma, struct vm_area_struct *prev,
64604 unsigned long start, unsigned long end);
64605@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
64606 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
64607 *
64608 */
64609-pgprot_t protection_map[16] = {
64610+pgprot_t protection_map[16] __read_only = {
64611 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
64612 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
64613 };
64614
64615-pgprot_t vm_get_page_prot(unsigned long vm_flags)
64616+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
64617 {
64618- return __pgprot(pgprot_val(protection_map[vm_flags &
64619+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
64620 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
64621 pgprot_val(arch_vm_get_page_prot(vm_flags)));
64622+
64623+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64624+ if (!(__supported_pte_mask & _PAGE_NX) &&
64625+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
64626+ (vm_flags & (VM_READ | VM_WRITE)))
64627+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
64628+#endif
64629+
64630+ return prot;
64631 }
64632 EXPORT_SYMBOL(vm_get_page_prot);
64633
64634 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
64635 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
64636 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
64637+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
64638 /*
64639 * Make sure vm_committed_as in one cacheline and not cacheline shared with
64640 * other variables. It can be updated by several CPUs frequently.
64641@@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
64642 struct vm_area_struct *next = vma->vm_next;
64643
64644 might_sleep();
64645+ BUG_ON(vma->vm_mirror);
64646 if (vma->vm_ops && vma->vm_ops->close)
64647 vma->vm_ops->close(vma);
64648 if (vma->vm_file) {
64649@@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
64650 * not page aligned -Ram Gupta
64651 */
64652 rlim = rlimit(RLIMIT_DATA);
64653+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
64654 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
64655 (mm->end_data - mm->start_data) > rlim)
64656 goto out;
64657@@ -697,6 +719,12 @@ static int
64658 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
64659 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64660 {
64661+
64662+#ifdef CONFIG_PAX_SEGMEXEC
64663+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
64664+ return 0;
64665+#endif
64666+
64667 if (is_mergeable_vma(vma, file, vm_flags) &&
64668 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
64669 if (vma->vm_pgoff == vm_pgoff)
64670@@ -716,6 +744,12 @@ static int
64671 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
64672 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64673 {
64674+
64675+#ifdef CONFIG_PAX_SEGMEXEC
64676+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
64677+ return 0;
64678+#endif
64679+
64680 if (is_mergeable_vma(vma, file, vm_flags) &&
64681 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
64682 pgoff_t vm_pglen;
64683@@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
64684 struct vm_area_struct *vma_merge(struct mm_struct *mm,
64685 struct vm_area_struct *prev, unsigned long addr,
64686 unsigned long end, unsigned long vm_flags,
64687- struct anon_vma *anon_vma, struct file *file,
64688+ struct anon_vma *anon_vma, struct file *file,
64689 pgoff_t pgoff, struct mempolicy *policy)
64690 {
64691 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
64692 struct vm_area_struct *area, *next;
64693 int err;
64694
64695+#ifdef CONFIG_PAX_SEGMEXEC
64696+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
64697+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
64698+
64699+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
64700+#endif
64701+
64702 /*
64703 * We later require that vma->vm_flags == vm_flags,
64704 * so this tests vma->vm_flags & VM_SPECIAL, too.
64705@@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
64706 if (next && next->vm_end == end) /* cases 6, 7, 8 */
64707 next = next->vm_next;
64708
64709+#ifdef CONFIG_PAX_SEGMEXEC
64710+ if (prev)
64711+ prev_m = pax_find_mirror_vma(prev);
64712+ if (area)
64713+ area_m = pax_find_mirror_vma(area);
64714+ if (next)
64715+ next_m = pax_find_mirror_vma(next);
64716+#endif
64717+
64718 /*
64719 * Can it merge with the predecessor?
64720 */
64721@@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
64722 /* cases 1, 6 */
64723 err = vma_adjust(prev, prev->vm_start,
64724 next->vm_end, prev->vm_pgoff, NULL);
64725- } else /* cases 2, 5, 7 */
64726+
64727+#ifdef CONFIG_PAX_SEGMEXEC
64728+ if (!err && prev_m)
64729+ err = vma_adjust(prev_m, prev_m->vm_start,
64730+ next_m->vm_end, prev_m->vm_pgoff, NULL);
64731+#endif
64732+
64733+ } else { /* cases 2, 5, 7 */
64734 err = vma_adjust(prev, prev->vm_start,
64735 end, prev->vm_pgoff, NULL);
64736+
64737+#ifdef CONFIG_PAX_SEGMEXEC
64738+ if (!err && prev_m)
64739+ err = vma_adjust(prev_m, prev_m->vm_start,
64740+ end_m, prev_m->vm_pgoff, NULL);
64741+#endif
64742+
64743+ }
64744 if (err)
64745 return NULL;
64746 khugepaged_enter_vma_merge(prev);
64747@@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
64748 mpol_equal(policy, vma_policy(next)) &&
64749 can_vma_merge_before(next, vm_flags,
64750 anon_vma, file, pgoff+pglen)) {
64751- if (prev && addr < prev->vm_end) /* case 4 */
64752+ if (prev && addr < prev->vm_end) { /* case 4 */
64753 err = vma_adjust(prev, prev->vm_start,
64754 addr, prev->vm_pgoff, NULL);
64755- else /* cases 3, 8 */
64756+
64757+#ifdef CONFIG_PAX_SEGMEXEC
64758+ if (!err && prev_m)
64759+ err = vma_adjust(prev_m, prev_m->vm_start,
64760+ addr_m, prev_m->vm_pgoff, NULL);
64761+#endif
64762+
64763+ } else { /* cases 3, 8 */
64764 err = vma_adjust(area, addr, next->vm_end,
64765 next->vm_pgoff - pglen, NULL);
64766+
64767+#ifdef CONFIG_PAX_SEGMEXEC
64768+ if (!err && area_m)
64769+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
64770+ next_m->vm_pgoff - pglen, NULL);
64771+#endif
64772+
64773+ }
64774 if (err)
64775 return NULL;
64776 khugepaged_enter_vma_merge(area);
64777@@ -929,14 +1009,11 @@ none:
64778 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
64779 struct file *file, long pages)
64780 {
64781- const unsigned long stack_flags
64782- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
64783-
64784 if (file) {
64785 mm->shared_vm += pages;
64786 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
64787 mm->exec_vm += pages;
64788- } else if (flags & stack_flags)
64789+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
64790 mm->stack_vm += pages;
64791 if (flags & (VM_RESERVED|VM_IO))
64792 mm->reserved_vm += pages;
64793@@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
64794 * (the exception is when the underlying filesystem is noexec
64795 * mounted, in which case we dont add PROT_EXEC.)
64796 */
64797- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
64798+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
64799 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
64800 prot |= PROT_EXEC;
64801
64802@@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
64803 /* Obtain the address to map to. we verify (or select) it and ensure
64804 * that it represents a valid section of the address space.
64805 */
64806- addr = get_unmapped_area(file, addr, len, pgoff, flags);
64807+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
64808 if (addr & ~PAGE_MASK)
64809 return addr;
64810
64811@@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
64812 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
64813 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
64814
64815+#ifdef CONFIG_PAX_MPROTECT
64816+ if (mm->pax_flags & MF_PAX_MPROTECT) {
64817+#ifndef CONFIG_PAX_MPROTECT_COMPAT
64818+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
64819+ gr_log_rwxmmap(file);
64820+
64821+#ifdef CONFIG_PAX_EMUPLT
64822+ vm_flags &= ~VM_EXEC;
64823+#else
64824+ return -EPERM;
64825+#endif
64826+
64827+ }
64828+
64829+ if (!(vm_flags & VM_EXEC))
64830+ vm_flags &= ~VM_MAYEXEC;
64831+#else
64832+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
64833+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
64834+#endif
64835+ else
64836+ vm_flags &= ~VM_MAYWRITE;
64837+ }
64838+#endif
64839+
64840+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64841+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
64842+ vm_flags &= ~VM_PAGEEXEC;
64843+#endif
64844+
64845 if (flags & MAP_LOCKED)
64846 if (!can_do_mlock())
64847 return -EPERM;
64848@@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
64849 locked += mm->locked_vm;
64850 lock_limit = rlimit(RLIMIT_MEMLOCK);
64851 lock_limit >>= PAGE_SHIFT;
64852+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
64853 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
64854 return -EAGAIN;
64855 }
64856@@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
64857 if (error)
64858 return error;
64859
64860+ if (!gr_acl_handle_mmap(file, prot))
64861+ return -EACCES;
64862+
64863 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
64864 }
64865 EXPORT_SYMBOL(do_mmap_pgoff);
64866@@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
64867 vm_flags_t vm_flags = vma->vm_flags;
64868
64869 /* If it was private or non-writable, the write bit is already clear */
64870- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
64871+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
64872 return 0;
64873
64874 /* The backer wishes to know when pages are first written to? */
64875@@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
64876 unsigned long charged = 0;
64877 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
64878
64879+#ifdef CONFIG_PAX_SEGMEXEC
64880+ struct vm_area_struct *vma_m = NULL;
64881+#endif
64882+
64883+ /*
64884+ * mm->mmap_sem is required to protect against another thread
64885+ * changing the mappings in case we sleep.
64886+ */
64887+ verify_mm_writelocked(mm);
64888+
64889 /* Clear old maps */
64890 error = -ENOMEM;
64891-munmap_back:
64892 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64893 if (vma && vma->vm_start < addr + len) {
64894 if (do_munmap(mm, addr, len))
64895 return -ENOMEM;
64896- goto munmap_back;
64897+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64898+ BUG_ON(vma && vma->vm_start < addr + len);
64899 }
64900
64901 /* Check against address space limit. */
64902@@ -1266,6 +1387,16 @@ munmap_back:
64903 goto unacct_error;
64904 }
64905
64906+#ifdef CONFIG_PAX_SEGMEXEC
64907+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
64908+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
64909+ if (!vma_m) {
64910+ error = -ENOMEM;
64911+ goto free_vma;
64912+ }
64913+ }
64914+#endif
64915+
64916 vma->vm_mm = mm;
64917 vma->vm_start = addr;
64918 vma->vm_end = addr + len;
64919@@ -1289,6 +1420,19 @@ munmap_back:
64920 error = file->f_op->mmap(file, vma);
64921 if (error)
64922 goto unmap_and_free_vma;
64923+
64924+#ifdef CONFIG_PAX_SEGMEXEC
64925+ if (vma_m && (vm_flags & VM_EXECUTABLE))
64926+ added_exe_file_vma(mm);
64927+#endif
64928+
64929+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64930+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
64931+ vma->vm_flags |= VM_PAGEEXEC;
64932+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
64933+ }
64934+#endif
64935+
64936 if (vm_flags & VM_EXECUTABLE)
64937 added_exe_file_vma(mm);
64938
64939@@ -1324,6 +1468,11 @@ munmap_back:
64940 vma_link(mm, vma, prev, rb_link, rb_parent);
64941 file = vma->vm_file;
64942
64943+#ifdef CONFIG_PAX_SEGMEXEC
64944+ if (vma_m)
64945+ BUG_ON(pax_mirror_vma(vma_m, vma));
64946+#endif
64947+
64948 /* Once vma denies write, undo our temporary denial count */
64949 if (correct_wcount)
64950 atomic_inc(&inode->i_writecount);
64951@@ -1332,6 +1481,7 @@ out:
64952
64953 mm->total_vm += len >> PAGE_SHIFT;
64954 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
64955+ track_exec_limit(mm, addr, addr + len, vm_flags);
64956 if (vm_flags & VM_LOCKED) {
64957 if (!mlock_vma_pages_range(vma, addr, addr + len))
64958 mm->locked_vm += (len >> PAGE_SHIFT);
64959@@ -1349,6 +1499,12 @@ unmap_and_free_vma:
64960 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
64961 charged = 0;
64962 free_vma:
64963+
64964+#ifdef CONFIG_PAX_SEGMEXEC
64965+ if (vma_m)
64966+ kmem_cache_free(vm_area_cachep, vma_m);
64967+#endif
64968+
64969 kmem_cache_free(vm_area_cachep, vma);
64970 unacct_error:
64971 if (charged)
64972@@ -1356,6 +1512,44 @@ unacct_error:
64973 return error;
64974 }
64975
64976+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
64977+{
64978+ if (!vma) {
64979+#ifdef CONFIG_STACK_GROWSUP
64980+ if (addr > sysctl_heap_stack_gap)
64981+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
64982+ else
64983+ vma = find_vma(current->mm, 0);
64984+ if (vma && (vma->vm_flags & VM_GROWSUP))
64985+ return false;
64986+#endif
64987+ return true;
64988+ }
64989+
64990+ if (addr + len > vma->vm_start)
64991+ return false;
64992+
64993+ if (vma->vm_flags & VM_GROWSDOWN)
64994+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
64995+#ifdef CONFIG_STACK_GROWSUP
64996+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
64997+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
64998+#endif
64999+
65000+ return true;
65001+}
65002+
65003+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
65004+{
65005+ if (vma->vm_start < len)
65006+ return -ENOMEM;
65007+ if (!(vma->vm_flags & VM_GROWSDOWN))
65008+ return vma->vm_start - len;
65009+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
65010+ return vma->vm_start - len - sysctl_heap_stack_gap;
65011+ return -ENOMEM;
65012+}
65013+
65014 /* Get an address range which is currently unmapped.
65015 * For shmat() with addr=0.
65016 *
65017@@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
65018 if (flags & MAP_FIXED)
65019 return addr;
65020
65021+#ifdef CONFIG_PAX_RANDMMAP
65022+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
65023+#endif
65024+
65025 if (addr) {
65026 addr = PAGE_ALIGN(addr);
65027- vma = find_vma(mm, addr);
65028- if (TASK_SIZE - len >= addr &&
65029- (!vma || addr + len <= vma->vm_start))
65030- return addr;
65031+ if (TASK_SIZE - len >= addr) {
65032+ vma = find_vma(mm, addr);
65033+ if (check_heap_stack_gap(vma, addr, len))
65034+ return addr;
65035+ }
65036 }
65037 if (len > mm->cached_hole_size) {
65038- start_addr = addr = mm->free_area_cache;
65039+ start_addr = addr = mm->free_area_cache;
65040 } else {
65041- start_addr = addr = TASK_UNMAPPED_BASE;
65042- mm->cached_hole_size = 0;
65043+ start_addr = addr = mm->mmap_base;
65044+ mm->cached_hole_size = 0;
65045 }
65046
65047 full_search:
65048@@ -1404,34 +1603,40 @@ full_search:
65049 * Start a new search - just in case we missed
65050 * some holes.
65051 */
65052- if (start_addr != TASK_UNMAPPED_BASE) {
65053- addr = TASK_UNMAPPED_BASE;
65054- start_addr = addr;
65055+ if (start_addr != mm->mmap_base) {
65056+ start_addr = addr = mm->mmap_base;
65057 mm->cached_hole_size = 0;
65058 goto full_search;
65059 }
65060 return -ENOMEM;
65061 }
65062- if (!vma || addr + len <= vma->vm_start) {
65063- /*
65064- * Remember the place where we stopped the search:
65065- */
65066- mm->free_area_cache = addr + len;
65067- return addr;
65068- }
65069+ if (check_heap_stack_gap(vma, addr, len))
65070+ break;
65071 if (addr + mm->cached_hole_size < vma->vm_start)
65072 mm->cached_hole_size = vma->vm_start - addr;
65073 addr = vma->vm_end;
65074 }
65075+
65076+ /*
65077+ * Remember the place where we stopped the search:
65078+ */
65079+ mm->free_area_cache = addr + len;
65080+ return addr;
65081 }
65082 #endif
65083
65084 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
65085 {
65086+
65087+#ifdef CONFIG_PAX_SEGMEXEC
65088+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
65089+ return;
65090+#endif
65091+
65092 /*
65093 * Is this a new hole at the lowest possible address?
65094 */
65095- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
65096+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
65097 mm->free_area_cache = addr;
65098 mm->cached_hole_size = ~0UL;
65099 }
65100@@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
65101 {
65102 struct vm_area_struct *vma;
65103 struct mm_struct *mm = current->mm;
65104- unsigned long addr = addr0;
65105+ unsigned long base = mm->mmap_base, addr = addr0;
65106
65107 /* requested length too big for entire address space */
65108 if (len > TASK_SIZE)
65109@@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
65110 if (flags & MAP_FIXED)
65111 return addr;
65112
65113+#ifdef CONFIG_PAX_RANDMMAP
65114+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
65115+#endif
65116+
65117 /* requesting a specific address */
65118 if (addr) {
65119 addr = PAGE_ALIGN(addr);
65120- vma = find_vma(mm, addr);
65121- if (TASK_SIZE - len >= addr &&
65122- (!vma || addr + len <= vma->vm_start))
65123- return addr;
65124+ if (TASK_SIZE - len >= addr) {
65125+ vma = find_vma(mm, addr);
65126+ if (check_heap_stack_gap(vma, addr, len))
65127+ return addr;
65128+ }
65129 }
65130
65131 /* check if free_area_cache is useful for us */
65132@@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
65133 /* make sure it can fit in the remaining address space */
65134 if (addr > len) {
65135 vma = find_vma(mm, addr-len);
65136- if (!vma || addr <= vma->vm_start)
65137+ if (check_heap_stack_gap(vma, addr - len, len))
65138 /* remember the address as a hint for next time */
65139 return (mm->free_area_cache = addr-len);
65140 }
65141@@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
65142 * return with success:
65143 */
65144 vma = find_vma(mm, addr);
65145- if (!vma || addr+len <= vma->vm_start)
65146+ if (check_heap_stack_gap(vma, addr, len))
65147 /* remember the address as a hint for next time */
65148 return (mm->free_area_cache = addr);
65149
65150@@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
65151 mm->cached_hole_size = vma->vm_start - addr;
65152
65153 /* try just below the current vma->vm_start */
65154- addr = vma->vm_start-len;
65155- } while (len < vma->vm_start);
65156+ addr = skip_heap_stack_gap(vma, len);
65157+ } while (!IS_ERR_VALUE(addr));
65158
65159 bottomup:
65160 /*
65161@@ -1515,13 +1725,21 @@ bottomup:
65162 * can happen with large stack limits and large mmap()
65163 * allocations.
65164 */
65165+ mm->mmap_base = TASK_UNMAPPED_BASE;
65166+
65167+#ifdef CONFIG_PAX_RANDMMAP
65168+ if (mm->pax_flags & MF_PAX_RANDMMAP)
65169+ mm->mmap_base += mm->delta_mmap;
65170+#endif
65171+
65172+ mm->free_area_cache = mm->mmap_base;
65173 mm->cached_hole_size = ~0UL;
65174- mm->free_area_cache = TASK_UNMAPPED_BASE;
65175 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
65176 /*
65177 * Restore the topdown base:
65178 */
65179- mm->free_area_cache = mm->mmap_base;
65180+ mm->mmap_base = base;
65181+ mm->free_area_cache = base;
65182 mm->cached_hole_size = ~0UL;
65183
65184 return addr;
65185@@ -1530,6 +1748,12 @@ bottomup:
65186
65187 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
65188 {
65189+
65190+#ifdef CONFIG_PAX_SEGMEXEC
65191+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
65192+ return;
65193+#endif
65194+
65195 /*
65196 * Is this a new hole at the highest possible address?
65197 */
65198@@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
65199 mm->free_area_cache = addr;
65200
65201 /* dont allow allocations above current base */
65202- if (mm->free_area_cache > mm->mmap_base)
65203+ if (mm->free_area_cache > mm->mmap_base) {
65204 mm->free_area_cache = mm->mmap_base;
65205+ mm->cached_hole_size = ~0UL;
65206+ }
65207 }
65208
65209 unsigned long
65210@@ -1646,6 +1872,28 @@ out:
65211 return prev ? prev->vm_next : vma;
65212 }
65213
65214+#ifdef CONFIG_PAX_SEGMEXEC
65215+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
65216+{
65217+ struct vm_area_struct *vma_m;
65218+
65219+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
65220+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
65221+ BUG_ON(vma->vm_mirror);
65222+ return NULL;
65223+ }
65224+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
65225+ vma_m = vma->vm_mirror;
65226+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
65227+ BUG_ON(vma->vm_file != vma_m->vm_file);
65228+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
65229+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
65230+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
65231+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
65232+ return vma_m;
65233+}
65234+#endif
65235+
65236 /*
65237 * Verify that the stack growth is acceptable and
65238 * update accounting. This is shared with both the
65239@@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
65240 return -ENOMEM;
65241
65242 /* Stack limit test */
65243+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
65244 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
65245 return -ENOMEM;
65246
65247@@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
65248 locked = mm->locked_vm + grow;
65249 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
65250 limit >>= PAGE_SHIFT;
65251+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
65252 if (locked > limit && !capable(CAP_IPC_LOCK))
65253 return -ENOMEM;
65254 }
65255@@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
65256 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
65257 * vma is the last one with address > vma->vm_end. Have to extend vma.
65258 */
65259+#ifndef CONFIG_IA64
65260+static
65261+#endif
65262 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
65263 {
65264 int error;
65265+ bool locknext;
65266
65267 if (!(vma->vm_flags & VM_GROWSUP))
65268 return -EFAULT;
65269
65270+ /* Also guard against wrapping around to address 0. */
65271+ if (address < PAGE_ALIGN(address+1))
65272+ address = PAGE_ALIGN(address+1);
65273+ else
65274+ return -ENOMEM;
65275+
65276 /*
65277 * We must make sure the anon_vma is allocated
65278 * so that the anon_vma locking is not a noop.
65279 */
65280 if (unlikely(anon_vma_prepare(vma)))
65281 return -ENOMEM;
65282+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
65283+ if (locknext && anon_vma_prepare(vma->vm_next))
65284+ return -ENOMEM;
65285 vma_lock_anon_vma(vma);
65286+ if (locknext)
65287+ vma_lock_anon_vma(vma->vm_next);
65288
65289 /*
65290 * vma->vm_start/vm_end cannot change under us because the caller
65291 * is required to hold the mmap_sem in read mode. We need the
65292- * anon_vma lock to serialize against concurrent expand_stacks.
65293- * Also guard against wrapping around to address 0.
65294+ * anon_vma locks to serialize against concurrent expand_stacks
65295+ * and expand_upwards.
65296 */
65297- if (address < PAGE_ALIGN(address+4))
65298- address = PAGE_ALIGN(address+4);
65299- else {
65300- vma_unlock_anon_vma(vma);
65301- return -ENOMEM;
65302- }
65303 error = 0;
65304
65305 /* Somebody else might have raced and expanded it already */
65306- if (address > vma->vm_end) {
65307+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
65308+ error = -ENOMEM;
65309+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
65310 unsigned long size, grow;
65311
65312 size = address - vma->vm_start;
65313@@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
65314 }
65315 }
65316 }
65317+ if (locknext)
65318+ vma_unlock_anon_vma(vma->vm_next);
65319 vma_unlock_anon_vma(vma);
65320 khugepaged_enter_vma_merge(vma);
65321 return error;
65322@@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
65323 unsigned long address)
65324 {
65325 int error;
65326+ bool lockprev = false;
65327+ struct vm_area_struct *prev;
65328
65329 /*
65330 * We must make sure the anon_vma is allocated
65331@@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
65332 if (error)
65333 return error;
65334
65335+ prev = vma->vm_prev;
65336+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
65337+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
65338+#endif
65339+ if (lockprev && anon_vma_prepare(prev))
65340+ return -ENOMEM;
65341+ if (lockprev)
65342+ vma_lock_anon_vma(prev);
65343+
65344 vma_lock_anon_vma(vma);
65345
65346 /*
65347@@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
65348 */
65349
65350 /* Somebody else might have raced and expanded it already */
65351- if (address < vma->vm_start) {
65352+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
65353+ error = -ENOMEM;
65354+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
65355 unsigned long size, grow;
65356
65357+#ifdef CONFIG_PAX_SEGMEXEC
65358+ struct vm_area_struct *vma_m;
65359+
65360+ vma_m = pax_find_mirror_vma(vma);
65361+#endif
65362+
65363 size = vma->vm_end - address;
65364 grow = (vma->vm_start - address) >> PAGE_SHIFT;
65365
65366@@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
65367 if (!error) {
65368 vma->vm_start = address;
65369 vma->vm_pgoff -= grow;
65370+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
65371+
65372+#ifdef CONFIG_PAX_SEGMEXEC
65373+ if (vma_m) {
65374+ vma_m->vm_start -= grow << PAGE_SHIFT;
65375+ vma_m->vm_pgoff -= grow;
65376+ }
65377+#endif
65378+
65379 perf_event_mmap(vma);
65380 }
65381 }
65382 }
65383 vma_unlock_anon_vma(vma);
65384+ if (lockprev)
65385+ vma_unlock_anon_vma(prev);
65386 khugepaged_enter_vma_merge(vma);
65387 return error;
65388 }
65389@@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
65390 do {
65391 long nrpages = vma_pages(vma);
65392
65393+#ifdef CONFIG_PAX_SEGMEXEC
65394+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
65395+ vma = remove_vma(vma);
65396+ continue;
65397+ }
65398+#endif
65399+
65400 mm->total_vm -= nrpages;
65401 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
65402 vma = remove_vma(vma);
65403@@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
65404 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
65405 vma->vm_prev = NULL;
65406 do {
65407+
65408+#ifdef CONFIG_PAX_SEGMEXEC
65409+ if (vma->vm_mirror) {
65410+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
65411+ vma->vm_mirror->vm_mirror = NULL;
65412+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
65413+ vma->vm_mirror = NULL;
65414+ }
65415+#endif
65416+
65417 rb_erase(&vma->vm_rb, &mm->mm_rb);
65418 mm->map_count--;
65419 tail_vma = vma;
65420@@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
65421 struct vm_area_struct *new;
65422 int err = -ENOMEM;
65423
65424+#ifdef CONFIG_PAX_SEGMEXEC
65425+ struct vm_area_struct *vma_m, *new_m = NULL;
65426+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
65427+#endif
65428+
65429 if (is_vm_hugetlb_page(vma) && (addr &
65430 ~(huge_page_mask(hstate_vma(vma)))))
65431 return -EINVAL;
65432
65433+#ifdef CONFIG_PAX_SEGMEXEC
65434+ vma_m = pax_find_mirror_vma(vma);
65435+#endif
65436+
65437 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65438 if (!new)
65439 goto out_err;
65440
65441+#ifdef CONFIG_PAX_SEGMEXEC
65442+ if (vma_m) {
65443+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65444+ if (!new_m) {
65445+ kmem_cache_free(vm_area_cachep, new);
65446+ goto out_err;
65447+ }
65448+ }
65449+#endif
65450+
65451 /* most fields are the same, copy all, and then fixup */
65452 *new = *vma;
65453
65454@@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
65455 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
65456 }
65457
65458+#ifdef CONFIG_PAX_SEGMEXEC
65459+ if (vma_m) {
65460+ *new_m = *vma_m;
65461+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
65462+ new_m->vm_mirror = new;
65463+ new->vm_mirror = new_m;
65464+
65465+ if (new_below)
65466+ new_m->vm_end = addr_m;
65467+ else {
65468+ new_m->vm_start = addr_m;
65469+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
65470+ }
65471+ }
65472+#endif
65473+
65474 pol = mpol_dup(vma_policy(vma));
65475 if (IS_ERR(pol)) {
65476 err = PTR_ERR(pol);
65477@@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
65478 else
65479 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
65480
65481+#ifdef CONFIG_PAX_SEGMEXEC
65482+ if (!err && vma_m) {
65483+ if (anon_vma_clone(new_m, vma_m))
65484+ goto out_free_mpol;
65485+
65486+ mpol_get(pol);
65487+ vma_set_policy(new_m, pol);
65488+
65489+ if (new_m->vm_file) {
65490+ get_file(new_m->vm_file);
65491+ if (vma_m->vm_flags & VM_EXECUTABLE)
65492+ added_exe_file_vma(mm);
65493+ }
65494+
65495+ if (new_m->vm_ops && new_m->vm_ops->open)
65496+ new_m->vm_ops->open(new_m);
65497+
65498+ if (new_below)
65499+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
65500+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
65501+ else
65502+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
65503+
65504+ if (err) {
65505+ if (new_m->vm_ops && new_m->vm_ops->close)
65506+ new_m->vm_ops->close(new_m);
65507+ if (new_m->vm_file) {
65508+ if (vma_m->vm_flags & VM_EXECUTABLE)
65509+ removed_exe_file_vma(mm);
65510+ fput(new_m->vm_file);
65511+ }
65512+ mpol_put(pol);
65513+ }
65514+ }
65515+#endif
65516+
65517 /* Success. */
65518 if (!err)
65519 return 0;
65520@@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
65521 removed_exe_file_vma(mm);
65522 fput(new->vm_file);
65523 }
65524- unlink_anon_vmas(new);
65525 out_free_mpol:
65526 mpol_put(pol);
65527 out_free_vma:
65528+
65529+#ifdef CONFIG_PAX_SEGMEXEC
65530+ if (new_m) {
65531+ unlink_anon_vmas(new_m);
65532+ kmem_cache_free(vm_area_cachep, new_m);
65533+ }
65534+#endif
65535+
65536+ unlink_anon_vmas(new);
65537 kmem_cache_free(vm_area_cachep, new);
65538 out_err:
65539 return err;
65540@@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
65541 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
65542 unsigned long addr, int new_below)
65543 {
65544+
65545+#ifdef CONFIG_PAX_SEGMEXEC
65546+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
65547+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
65548+ if (mm->map_count >= sysctl_max_map_count-1)
65549+ return -ENOMEM;
65550+ } else
65551+#endif
65552+
65553 if (mm->map_count >= sysctl_max_map_count)
65554 return -ENOMEM;
65555
65556@@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
65557 * work. This now handles partial unmappings.
65558 * Jeremy Fitzhardinge <jeremy@goop.org>
65559 */
65560+#ifdef CONFIG_PAX_SEGMEXEC
65561 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65562 {
65563+ int ret = __do_munmap(mm, start, len);
65564+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
65565+ return ret;
65566+
65567+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
65568+}
65569+
65570+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65571+#else
65572+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65573+#endif
65574+{
65575 unsigned long end;
65576 struct vm_area_struct *vma, *prev, *last;
65577
65578+ /*
65579+ * mm->mmap_sem is required to protect against another thread
65580+ * changing the mappings in case we sleep.
65581+ */
65582+ verify_mm_writelocked(mm);
65583+
65584 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
65585 return -EINVAL;
65586
65587@@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
65588 /* Fix up all other VM information */
65589 remove_vma_list(mm, vma);
65590
65591+ track_exec_limit(mm, start, end, 0UL);
65592+
65593 return 0;
65594 }
65595
65596@@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
65597
65598 profile_munmap(addr);
65599
65600+#ifdef CONFIG_PAX_SEGMEXEC
65601+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
65602+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
65603+ return -EINVAL;
65604+#endif
65605+
65606 down_write(&mm->mmap_sem);
65607 ret = do_munmap(mm, addr, len);
65608 up_write(&mm->mmap_sem);
65609 return ret;
65610 }
65611
65612-static inline void verify_mm_writelocked(struct mm_struct *mm)
65613-{
65614-#ifdef CONFIG_DEBUG_VM
65615- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
65616- WARN_ON(1);
65617- up_read(&mm->mmap_sem);
65618- }
65619-#endif
65620-}
65621-
65622 /*
65623 * this is really a simplified "do_mmap". it only handles
65624 * anonymous maps. eventually we may be able to do some
65625@@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
65626 struct rb_node ** rb_link, * rb_parent;
65627 pgoff_t pgoff = addr >> PAGE_SHIFT;
65628 int error;
65629+ unsigned long charged;
65630
65631 len = PAGE_ALIGN(len);
65632 if (!len)
65633@@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
65634
65635 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
65636
65637+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
65638+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65639+ flags &= ~VM_EXEC;
65640+
65641+#ifdef CONFIG_PAX_MPROTECT
65642+ if (mm->pax_flags & MF_PAX_MPROTECT)
65643+ flags &= ~VM_MAYEXEC;
65644+#endif
65645+
65646+ }
65647+#endif
65648+
65649 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
65650 if (error & ~PAGE_MASK)
65651 return error;
65652
65653+ charged = len >> PAGE_SHIFT;
65654+
65655 /*
65656 * mlock MCL_FUTURE?
65657 */
65658 if (mm->def_flags & VM_LOCKED) {
65659 unsigned long locked, lock_limit;
65660- locked = len >> PAGE_SHIFT;
65661+ locked = charged;
65662 locked += mm->locked_vm;
65663 lock_limit = rlimit(RLIMIT_MEMLOCK);
65664 lock_limit >>= PAGE_SHIFT;
65665@@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
65666 /*
65667 * Clear old maps. this also does some error checking for us
65668 */
65669- munmap_back:
65670 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65671 if (vma && vma->vm_start < addr + len) {
65672 if (do_munmap(mm, addr, len))
65673 return -ENOMEM;
65674- goto munmap_back;
65675+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65676+ BUG_ON(vma && vma->vm_start < addr + len);
65677 }
65678
65679 /* Check against address space limits *after* clearing old maps... */
65680- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
65681+ if (!may_expand_vm(mm, charged))
65682 return -ENOMEM;
65683
65684 if (mm->map_count > sysctl_max_map_count)
65685 return -ENOMEM;
65686
65687- if (security_vm_enough_memory(len >> PAGE_SHIFT))
65688+ if (security_vm_enough_memory(charged))
65689 return -ENOMEM;
65690
65691 /* Can we just expand an old private anonymous mapping? */
65692@@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
65693 */
65694 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65695 if (!vma) {
65696- vm_unacct_memory(len >> PAGE_SHIFT);
65697+ vm_unacct_memory(charged);
65698 return -ENOMEM;
65699 }
65700
65701@@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
65702 vma_link(mm, vma, prev, rb_link, rb_parent);
65703 out:
65704 perf_event_mmap(vma);
65705- mm->total_vm += len >> PAGE_SHIFT;
65706+ mm->total_vm += charged;
65707 if (flags & VM_LOCKED) {
65708 if (!mlock_vma_pages_range(vma, addr, addr + len))
65709- mm->locked_vm += (len >> PAGE_SHIFT);
65710+ mm->locked_vm += charged;
65711 }
65712+ track_exec_limit(mm, addr, addr + len, flags);
65713 return addr;
65714 }
65715
65716@@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
65717 * Walk the list again, actually closing and freeing it,
65718 * with preemption enabled, without holding any MM locks.
65719 */
65720- while (vma)
65721+ while (vma) {
65722+ vma->vm_mirror = NULL;
65723 vma = remove_vma(vma);
65724+ }
65725
65726 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
65727 }
65728@@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
65729 struct vm_area_struct * __vma, * prev;
65730 struct rb_node ** rb_link, * rb_parent;
65731
65732+#ifdef CONFIG_PAX_SEGMEXEC
65733+ struct vm_area_struct *vma_m = NULL;
65734+#endif
65735+
65736+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
65737+ return -EPERM;
65738+
65739 /*
65740 * The vm_pgoff of a purely anonymous vma should be irrelevant
65741 * until its first write fault, when page's anon_vma and index
65742@@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
65743 if ((vma->vm_flags & VM_ACCOUNT) &&
65744 security_vm_enough_memory_mm(mm, vma_pages(vma)))
65745 return -ENOMEM;
65746+
65747+#ifdef CONFIG_PAX_SEGMEXEC
65748+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
65749+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65750+ if (!vma_m)
65751+ return -ENOMEM;
65752+ }
65753+#endif
65754+
65755 vma_link(mm, vma, prev, rb_link, rb_parent);
65756+
65757+#ifdef CONFIG_PAX_SEGMEXEC
65758+ if (vma_m)
65759+ BUG_ON(pax_mirror_vma(vma_m, vma));
65760+#endif
65761+
65762 return 0;
65763 }
65764
65765@@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
65766 struct rb_node **rb_link, *rb_parent;
65767 struct mempolicy *pol;
65768
65769+ BUG_ON(vma->vm_mirror);
65770+
65771 /*
65772 * If anonymous vma has not yet been faulted, update new pgoff
65773 * to match new location, to increase its chance of merging.
65774@@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
65775 return NULL;
65776 }
65777
65778+#ifdef CONFIG_PAX_SEGMEXEC
65779+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
65780+{
65781+ struct vm_area_struct *prev_m;
65782+ struct rb_node **rb_link_m, *rb_parent_m;
65783+ struct mempolicy *pol_m;
65784+
65785+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
65786+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
65787+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
65788+ *vma_m = *vma;
65789+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
65790+ if (anon_vma_clone(vma_m, vma))
65791+ return -ENOMEM;
65792+ pol_m = vma_policy(vma_m);
65793+ mpol_get(pol_m);
65794+ vma_set_policy(vma_m, pol_m);
65795+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
65796+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
65797+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
65798+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
65799+ if (vma_m->vm_file)
65800+ get_file(vma_m->vm_file);
65801+ if (vma_m->vm_ops && vma_m->vm_ops->open)
65802+ vma_m->vm_ops->open(vma_m);
65803+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
65804+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
65805+ vma_m->vm_mirror = vma;
65806+ vma->vm_mirror = vma_m;
65807+ return 0;
65808+}
65809+#endif
65810+
65811 /*
65812 * Return true if the calling process may expand its vm space by the passed
65813 * number of pages
65814@@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
65815 unsigned long lim;
65816
65817 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
65818-
65819+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
65820 if (cur + npages > lim)
65821 return 0;
65822 return 1;
65823@@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
65824 vma->vm_start = addr;
65825 vma->vm_end = addr + len;
65826
65827+#ifdef CONFIG_PAX_MPROTECT
65828+ if (mm->pax_flags & MF_PAX_MPROTECT) {
65829+#ifndef CONFIG_PAX_MPROTECT_COMPAT
65830+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
65831+ return -EPERM;
65832+ if (!(vm_flags & VM_EXEC))
65833+ vm_flags &= ~VM_MAYEXEC;
65834+#else
65835+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
65836+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
65837+#endif
65838+ else
65839+ vm_flags &= ~VM_MAYWRITE;
65840+ }
65841+#endif
65842+
65843 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
65844 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65845
65846diff -urNp linux-3.0.4/mm/mprotect.c linux-3.0.4/mm/mprotect.c
65847--- linux-3.0.4/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
65848+++ linux-3.0.4/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
65849@@ -23,10 +23,16 @@
65850 #include <linux/mmu_notifier.h>
65851 #include <linux/migrate.h>
65852 #include <linux/perf_event.h>
65853+
65854+#ifdef CONFIG_PAX_MPROTECT
65855+#include <linux/elf.h>
65856+#endif
65857+
65858 #include <asm/uaccess.h>
65859 #include <asm/pgtable.h>
65860 #include <asm/cacheflush.h>
65861 #include <asm/tlbflush.h>
65862+#include <asm/mmu_context.h>
65863
65864 #ifndef pgprot_modify
65865 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
65866@@ -141,6 +147,48 @@ static void change_protection(struct vm_
65867 flush_tlb_range(vma, start, end);
65868 }
65869
65870+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65871+/* called while holding the mmap semaphor for writing except stack expansion */
65872+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
65873+{
65874+ unsigned long oldlimit, newlimit = 0UL;
65875+
65876+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
65877+ return;
65878+
65879+ spin_lock(&mm->page_table_lock);
65880+ oldlimit = mm->context.user_cs_limit;
65881+ if ((prot & VM_EXEC) && oldlimit < end)
65882+ /* USER_CS limit moved up */
65883+ newlimit = end;
65884+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
65885+ /* USER_CS limit moved down */
65886+ newlimit = start;
65887+
65888+ if (newlimit) {
65889+ mm->context.user_cs_limit = newlimit;
65890+
65891+#ifdef CONFIG_SMP
65892+ wmb();
65893+ cpus_clear(mm->context.cpu_user_cs_mask);
65894+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
65895+#endif
65896+
65897+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
65898+ }
65899+ spin_unlock(&mm->page_table_lock);
65900+ if (newlimit == end) {
65901+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
65902+
65903+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
65904+ if (is_vm_hugetlb_page(vma))
65905+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
65906+ else
65907+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
65908+ }
65909+}
65910+#endif
65911+
65912 int
65913 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
65914 unsigned long start, unsigned long end, unsigned long newflags)
65915@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
65916 int error;
65917 int dirty_accountable = 0;
65918
65919+#ifdef CONFIG_PAX_SEGMEXEC
65920+ struct vm_area_struct *vma_m = NULL;
65921+ unsigned long start_m, end_m;
65922+
65923+ start_m = start + SEGMEXEC_TASK_SIZE;
65924+ end_m = end + SEGMEXEC_TASK_SIZE;
65925+#endif
65926+
65927 if (newflags == oldflags) {
65928 *pprev = vma;
65929 return 0;
65930 }
65931
65932+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
65933+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
65934+
65935+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
65936+ return -ENOMEM;
65937+
65938+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
65939+ return -ENOMEM;
65940+ }
65941+
65942 /*
65943 * If we make a private mapping writable we increase our commit;
65944 * but (without finer accounting) cannot reduce our commit if we
65945@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
65946 }
65947 }
65948
65949+#ifdef CONFIG_PAX_SEGMEXEC
65950+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
65951+ if (start != vma->vm_start) {
65952+ error = split_vma(mm, vma, start, 1);
65953+ if (error)
65954+ goto fail;
65955+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
65956+ *pprev = (*pprev)->vm_next;
65957+ }
65958+
65959+ if (end != vma->vm_end) {
65960+ error = split_vma(mm, vma, end, 0);
65961+ if (error)
65962+ goto fail;
65963+ }
65964+
65965+ if (pax_find_mirror_vma(vma)) {
65966+ error = __do_munmap(mm, start_m, end_m - start_m);
65967+ if (error)
65968+ goto fail;
65969+ } else {
65970+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65971+ if (!vma_m) {
65972+ error = -ENOMEM;
65973+ goto fail;
65974+ }
65975+ vma->vm_flags = newflags;
65976+ error = pax_mirror_vma(vma_m, vma);
65977+ if (error) {
65978+ vma->vm_flags = oldflags;
65979+ goto fail;
65980+ }
65981+ }
65982+ }
65983+#endif
65984+
65985 /*
65986 * First try to merge with previous and/or next vma.
65987 */
65988@@ -204,9 +306,21 @@ success:
65989 * vm_flags and vm_page_prot are protected by the mmap_sem
65990 * held in write mode.
65991 */
65992+
65993+#ifdef CONFIG_PAX_SEGMEXEC
65994+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
65995+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
65996+#endif
65997+
65998 vma->vm_flags = newflags;
65999+
66000+#ifdef CONFIG_PAX_MPROTECT
66001+ if (mm->binfmt && mm->binfmt->handle_mprotect)
66002+ mm->binfmt->handle_mprotect(vma, newflags);
66003+#endif
66004+
66005 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
66006- vm_get_page_prot(newflags));
66007+ vm_get_page_prot(vma->vm_flags));
66008
66009 if (vma_wants_writenotify(vma)) {
66010 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
66011@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66012 end = start + len;
66013 if (end <= start)
66014 return -ENOMEM;
66015+
66016+#ifdef CONFIG_PAX_SEGMEXEC
66017+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
66018+ if (end > SEGMEXEC_TASK_SIZE)
66019+ return -EINVAL;
66020+ } else
66021+#endif
66022+
66023+ if (end > TASK_SIZE)
66024+ return -EINVAL;
66025+
66026 if (!arch_validate_prot(prot))
66027 return -EINVAL;
66028
66029@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66030 /*
66031 * Does the application expect PROT_READ to imply PROT_EXEC:
66032 */
66033- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
66034+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
66035 prot |= PROT_EXEC;
66036
66037 vm_flags = calc_vm_prot_bits(prot);
66038@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66039 if (start > vma->vm_start)
66040 prev = vma;
66041
66042+#ifdef CONFIG_PAX_MPROTECT
66043+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
66044+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
66045+#endif
66046+
66047 for (nstart = start ; ; ) {
66048 unsigned long newflags;
66049
66050@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66051
66052 /* newflags >> 4 shift VM_MAY% in place of VM_% */
66053 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
66054+ if (prot & (PROT_WRITE | PROT_EXEC))
66055+ gr_log_rwxmprotect(vma->vm_file);
66056+
66057+ error = -EACCES;
66058+ goto out;
66059+ }
66060+
66061+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
66062 error = -EACCES;
66063 goto out;
66064 }
66065@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66066 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
66067 if (error)
66068 goto out;
66069+
66070+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
66071+
66072 nstart = tmp;
66073
66074 if (nstart < prev->vm_end)
66075diff -urNp linux-3.0.4/mm/mremap.c linux-3.0.4/mm/mremap.c
66076--- linux-3.0.4/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
66077+++ linux-3.0.4/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
66078@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
66079 continue;
66080 pte = ptep_clear_flush(vma, old_addr, old_pte);
66081 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
66082+
66083+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66084+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
66085+ pte = pte_exprotect(pte);
66086+#endif
66087+
66088 set_pte_at(mm, new_addr, new_pte, pte);
66089 }
66090
66091@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
66092 if (is_vm_hugetlb_page(vma))
66093 goto Einval;
66094
66095+#ifdef CONFIG_PAX_SEGMEXEC
66096+ if (pax_find_mirror_vma(vma))
66097+ goto Einval;
66098+#endif
66099+
66100 /* We can't remap across vm area boundaries */
66101 if (old_len > vma->vm_end - addr)
66102 goto Efault;
66103@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
66104 unsigned long ret = -EINVAL;
66105 unsigned long charged = 0;
66106 unsigned long map_flags;
66107+ unsigned long pax_task_size = TASK_SIZE;
66108
66109 if (new_addr & ~PAGE_MASK)
66110 goto out;
66111
66112- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
66113+#ifdef CONFIG_PAX_SEGMEXEC
66114+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
66115+ pax_task_size = SEGMEXEC_TASK_SIZE;
66116+#endif
66117+
66118+ pax_task_size -= PAGE_SIZE;
66119+
66120+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
66121 goto out;
66122
66123 /* Check if the location we're moving into overlaps the
66124 * old location at all, and fail if it does.
66125 */
66126- if ((new_addr <= addr) && (new_addr+new_len) > addr)
66127- goto out;
66128-
66129- if ((addr <= new_addr) && (addr+old_len) > new_addr)
66130+ if (addr + old_len > new_addr && new_addr + new_len > addr)
66131 goto out;
66132
66133 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
66134@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
66135 struct vm_area_struct *vma;
66136 unsigned long ret = -EINVAL;
66137 unsigned long charged = 0;
66138+ unsigned long pax_task_size = TASK_SIZE;
66139
66140 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
66141 goto out;
66142@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
66143 if (!new_len)
66144 goto out;
66145
66146+#ifdef CONFIG_PAX_SEGMEXEC
66147+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
66148+ pax_task_size = SEGMEXEC_TASK_SIZE;
66149+#endif
66150+
66151+ pax_task_size -= PAGE_SIZE;
66152+
66153+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
66154+ old_len > pax_task_size || addr > pax_task_size-old_len)
66155+ goto out;
66156+
66157 if (flags & MREMAP_FIXED) {
66158 if (flags & MREMAP_MAYMOVE)
66159 ret = mremap_to(addr, old_len, new_addr, new_len);
66160@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
66161 addr + new_len);
66162 }
66163 ret = addr;
66164+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
66165 goto out;
66166 }
66167 }
66168@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
66169 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
66170 if (ret)
66171 goto out;
66172+
66173+ map_flags = vma->vm_flags;
66174 ret = move_vma(vma, addr, old_len, new_len, new_addr);
66175+ if (!(ret & ~PAGE_MASK)) {
66176+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
66177+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
66178+ }
66179 }
66180 out:
66181 if (ret & ~PAGE_MASK)
66182diff -urNp linux-3.0.4/mm/nobootmem.c linux-3.0.4/mm/nobootmem.c
66183--- linux-3.0.4/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
66184+++ linux-3.0.4/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
66185@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
66186 unsigned long __init free_all_memory_core_early(int nodeid)
66187 {
66188 int i;
66189- u64 start, end;
66190+ u64 start, end, startrange, endrange;
66191 unsigned long count = 0;
66192- struct range *range = NULL;
66193+ struct range *range = NULL, rangerange = { 0, 0 };
66194 int nr_range;
66195
66196 nr_range = get_free_all_memory_range(&range, nodeid);
66197+ startrange = __pa(range) >> PAGE_SHIFT;
66198+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
66199
66200 for (i = 0; i < nr_range; i++) {
66201 start = range[i].start;
66202 end = range[i].end;
66203+ if (start <= endrange && startrange < end) {
66204+ BUG_ON(rangerange.start | rangerange.end);
66205+ rangerange = range[i];
66206+ continue;
66207+ }
66208 count += end - start;
66209 __free_pages_memory(start, end);
66210 }
66211+ start = rangerange.start;
66212+ end = rangerange.end;
66213+ count += end - start;
66214+ __free_pages_memory(start, end);
66215
66216 return count;
66217 }
66218diff -urNp linux-3.0.4/mm/nommu.c linux-3.0.4/mm/nommu.c
66219--- linux-3.0.4/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
66220+++ linux-3.0.4/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
66221@@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
66222 int sysctl_overcommit_ratio = 50; /* default is 50% */
66223 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
66224 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
66225-int heap_stack_gap = 0;
66226
66227 atomic_long_t mmap_pages_allocated;
66228
66229@@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
66230 EXPORT_SYMBOL(find_vma);
66231
66232 /*
66233- * find a VMA
66234- * - we don't extend stack VMAs under NOMMU conditions
66235- */
66236-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
66237-{
66238- return find_vma(mm, addr);
66239-}
66240-
66241-/*
66242 * expand a stack to a given address
66243 * - not supported under NOMMU conditions
66244 */
66245@@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
66246
66247 /* most fields are the same, copy all, and then fixup */
66248 *new = *vma;
66249+ INIT_LIST_HEAD(&new->anon_vma_chain);
66250 *region = *vma->vm_region;
66251 new->vm_region = region;
66252
66253diff -urNp linux-3.0.4/mm/page_alloc.c linux-3.0.4/mm/page_alloc.c
66254--- linux-3.0.4/mm/page_alloc.c 2011-07-21 22:17:23.000000000 -0400
66255+++ linux-3.0.4/mm/page_alloc.c 2011-08-23 21:48:14.000000000 -0400
66256@@ -340,7 +340,7 @@ out:
66257 * This usage means that zero-order pages may not be compound.
66258 */
66259
66260-static void free_compound_page(struct page *page)
66261+void free_compound_page(struct page *page)
66262 {
66263 __free_pages_ok(page, compound_order(page));
66264 }
66265@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
66266 int i;
66267 int bad = 0;
66268
66269+#ifdef CONFIG_PAX_MEMORY_SANITIZE
66270+ unsigned long index = 1UL << order;
66271+#endif
66272+
66273 trace_mm_page_free_direct(page, order);
66274 kmemcheck_free_shadow(page, order);
66275
66276@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
66277 debug_check_no_obj_freed(page_address(page),
66278 PAGE_SIZE << order);
66279 }
66280+
66281+#ifdef CONFIG_PAX_MEMORY_SANITIZE
66282+ for (; index; --index)
66283+ sanitize_highpage(page + index - 1);
66284+#endif
66285+
66286 arch_free_page(page, order);
66287 kernel_map_pages(page, 1 << order, 0);
66288
66289@@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
66290 arch_alloc_page(page, order);
66291 kernel_map_pages(page, 1 << order, 1);
66292
66293+#ifndef CONFIG_PAX_MEMORY_SANITIZE
66294 if (gfp_flags & __GFP_ZERO)
66295 prep_zero_page(page, order, gfp_flags);
66296+#endif
66297
66298 if (order && (gfp_flags & __GFP_COMP))
66299 prep_compound_page(page, order);
66300@@ -2525,6 +2537,8 @@ void show_free_areas(unsigned int filter
66301 int cpu;
66302 struct zone *zone;
66303
66304+ pax_track_stack();
66305+
66306 for_each_populated_zone(zone) {
66307 if (skip_free_areas_node(filter, zone_to_nid(zone)))
66308 continue;
66309diff -urNp linux-3.0.4/mm/percpu.c linux-3.0.4/mm/percpu.c
66310--- linux-3.0.4/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
66311+++ linux-3.0.4/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
66312@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
66313 static unsigned int pcpu_last_unit_cpu __read_mostly;
66314
66315 /* the address of the first chunk which starts with the kernel static area */
66316-void *pcpu_base_addr __read_mostly;
66317+void *pcpu_base_addr __read_only;
66318 EXPORT_SYMBOL_GPL(pcpu_base_addr);
66319
66320 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
66321diff -urNp linux-3.0.4/mm/rmap.c linux-3.0.4/mm/rmap.c
66322--- linux-3.0.4/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
66323+++ linux-3.0.4/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
66324@@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
66325 struct anon_vma *anon_vma = vma->anon_vma;
66326 struct anon_vma_chain *avc;
66327
66328+#ifdef CONFIG_PAX_SEGMEXEC
66329+ struct anon_vma_chain *avc_m = NULL;
66330+#endif
66331+
66332 might_sleep();
66333 if (unlikely(!anon_vma)) {
66334 struct mm_struct *mm = vma->vm_mm;
66335@@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
66336 if (!avc)
66337 goto out_enomem;
66338
66339+#ifdef CONFIG_PAX_SEGMEXEC
66340+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
66341+ if (!avc_m)
66342+ goto out_enomem_free_avc;
66343+#endif
66344+
66345 anon_vma = find_mergeable_anon_vma(vma);
66346 allocated = NULL;
66347 if (!anon_vma) {
66348@@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
66349 /* page_table_lock to protect against threads */
66350 spin_lock(&mm->page_table_lock);
66351 if (likely(!vma->anon_vma)) {
66352+
66353+#ifdef CONFIG_PAX_SEGMEXEC
66354+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
66355+
66356+ if (vma_m) {
66357+ BUG_ON(vma_m->anon_vma);
66358+ vma_m->anon_vma = anon_vma;
66359+ avc_m->anon_vma = anon_vma;
66360+ avc_m->vma = vma;
66361+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
66362+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
66363+ avc_m = NULL;
66364+ }
66365+#endif
66366+
66367 vma->anon_vma = anon_vma;
66368 avc->anon_vma = anon_vma;
66369 avc->vma = vma;
66370@@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
66371
66372 if (unlikely(allocated))
66373 put_anon_vma(allocated);
66374+
66375+#ifdef CONFIG_PAX_SEGMEXEC
66376+ if (unlikely(avc_m))
66377+ anon_vma_chain_free(avc_m);
66378+#endif
66379+
66380 if (unlikely(avc))
66381 anon_vma_chain_free(avc);
66382 }
66383 return 0;
66384
66385 out_enomem_free_avc:
66386+
66387+#ifdef CONFIG_PAX_SEGMEXEC
66388+ if (avc_m)
66389+ anon_vma_chain_free(avc_m);
66390+#endif
66391+
66392 anon_vma_chain_free(avc);
66393 out_enomem:
66394 return -ENOMEM;
66395@@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
66396 * Attach the anon_vmas from src to dst.
66397 * Returns 0 on success, -ENOMEM on failure.
66398 */
66399-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
66400+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
66401 {
66402 struct anon_vma_chain *avc, *pavc;
66403 struct anon_vma *root = NULL;
66404@@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
66405 * the corresponding VMA in the parent process is attached to.
66406 * Returns 0 on success, non-zero on failure.
66407 */
66408-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
66409+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
66410 {
66411 struct anon_vma_chain *avc;
66412 struct anon_vma *anon_vma;
66413diff -urNp linux-3.0.4/mm/shmem.c linux-3.0.4/mm/shmem.c
66414--- linux-3.0.4/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
66415+++ linux-3.0.4/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
66416@@ -31,7 +31,7 @@
66417 #include <linux/percpu_counter.h>
66418 #include <linux/swap.h>
66419
66420-static struct vfsmount *shm_mnt;
66421+struct vfsmount *shm_mnt;
66422
66423 #ifdef CONFIG_SHMEM
66424 /*
66425@@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
66426 goto unlock;
66427 }
66428 entry = shmem_swp_entry(info, index, NULL);
66429+ if (!entry)
66430+ goto unlock;
66431 if (entry->val) {
66432 /*
66433 * The more uptodate page coming down from a stacked
66434@@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
66435 struct vm_area_struct pvma;
66436 struct page *page;
66437
66438+ pax_track_stack();
66439+
66440 spol = mpol_cond_copy(&mpol,
66441 mpol_shared_policy_lookup(&info->policy, idx));
66442
66443@@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
66444 int err = -ENOMEM;
66445
66446 /* Round up to L1_CACHE_BYTES to resist false sharing */
66447- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
66448- L1_CACHE_BYTES), GFP_KERNEL);
66449+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
66450 if (!sbinfo)
66451 return -ENOMEM;
66452
66453diff -urNp linux-3.0.4/mm/slab.c linux-3.0.4/mm/slab.c
66454--- linux-3.0.4/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
66455+++ linux-3.0.4/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
66456@@ -151,7 +151,7 @@
66457
66458 /* Legal flag mask for kmem_cache_create(). */
66459 #if DEBUG
66460-# define CREATE_MASK (SLAB_RED_ZONE | \
66461+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
66462 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
66463 SLAB_CACHE_DMA | \
66464 SLAB_STORE_USER | \
66465@@ -159,7 +159,7 @@
66466 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66467 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
66468 #else
66469-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
66470+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
66471 SLAB_CACHE_DMA | \
66472 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
66473 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66474@@ -288,7 +288,7 @@ struct kmem_list3 {
66475 * Need this for bootstrapping a per node allocator.
66476 */
66477 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
66478-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
66479+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
66480 #define CACHE_CACHE 0
66481 #define SIZE_AC MAX_NUMNODES
66482 #define SIZE_L3 (2 * MAX_NUMNODES)
66483@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
66484 if ((x)->max_freeable < i) \
66485 (x)->max_freeable = i; \
66486 } while (0)
66487-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
66488-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
66489-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
66490-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
66491+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
66492+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
66493+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
66494+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
66495 #else
66496 #define STATS_INC_ACTIVE(x) do { } while (0)
66497 #define STATS_DEC_ACTIVE(x) do { } while (0)
66498@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
66499 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
66500 */
66501 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
66502- const struct slab *slab, void *obj)
66503+ const struct slab *slab, const void *obj)
66504 {
66505 u32 offset = (obj - slab->s_mem);
66506 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
66507@@ -564,7 +564,7 @@ struct cache_names {
66508 static struct cache_names __initdata cache_names[] = {
66509 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
66510 #include <linux/kmalloc_sizes.h>
66511- {NULL,}
66512+ {NULL}
66513 #undef CACHE
66514 };
66515
66516@@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
66517 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
66518 sizes[INDEX_AC].cs_size,
66519 ARCH_KMALLOC_MINALIGN,
66520- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66521+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66522 NULL);
66523
66524 if (INDEX_AC != INDEX_L3) {
66525@@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
66526 kmem_cache_create(names[INDEX_L3].name,
66527 sizes[INDEX_L3].cs_size,
66528 ARCH_KMALLOC_MINALIGN,
66529- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66530+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66531 NULL);
66532 }
66533
66534@@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
66535 sizes->cs_cachep = kmem_cache_create(names->name,
66536 sizes->cs_size,
66537 ARCH_KMALLOC_MINALIGN,
66538- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66539+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66540 NULL);
66541 }
66542 #ifdef CONFIG_ZONE_DMA
66543@@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
66544 }
66545 /* cpu stats */
66546 {
66547- unsigned long allochit = atomic_read(&cachep->allochit);
66548- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
66549- unsigned long freehit = atomic_read(&cachep->freehit);
66550- unsigned long freemiss = atomic_read(&cachep->freemiss);
66551+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
66552+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
66553+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
66554+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
66555
66556 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
66557 allochit, allocmiss, freehit, freemiss);
66558@@ -4532,15 +4532,66 @@ static const struct file_operations proc
66559
66560 static int __init slab_proc_init(void)
66561 {
66562- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
66563+ mode_t gr_mode = S_IRUGO;
66564+
66565+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66566+ gr_mode = S_IRUSR;
66567+#endif
66568+
66569+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
66570 #ifdef CONFIG_DEBUG_SLAB_LEAK
66571- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
66572+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
66573 #endif
66574 return 0;
66575 }
66576 module_init(slab_proc_init);
66577 #endif
66578
66579+void check_object_size(const void *ptr, unsigned long n, bool to)
66580+{
66581+
66582+#ifdef CONFIG_PAX_USERCOPY
66583+ struct page *page;
66584+ struct kmem_cache *cachep = NULL;
66585+ struct slab *slabp;
66586+ unsigned int objnr;
66587+ unsigned long offset;
66588+
66589+ if (!n)
66590+ return;
66591+
66592+ if (ZERO_OR_NULL_PTR(ptr))
66593+ goto report;
66594+
66595+ if (!virt_addr_valid(ptr))
66596+ return;
66597+
66598+ page = virt_to_head_page(ptr);
66599+
66600+ if (!PageSlab(page)) {
66601+ if (object_is_on_stack(ptr, n) == -1)
66602+ goto report;
66603+ return;
66604+ }
66605+
66606+ cachep = page_get_cache(page);
66607+ if (!(cachep->flags & SLAB_USERCOPY))
66608+ goto report;
66609+
66610+ slabp = page_get_slab(page);
66611+ objnr = obj_to_index(cachep, slabp, ptr);
66612+ BUG_ON(objnr >= cachep->num);
66613+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
66614+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
66615+ return;
66616+
66617+report:
66618+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
66619+#endif
66620+
66621+}
66622+EXPORT_SYMBOL(check_object_size);
66623+
66624 /**
66625 * ksize - get the actual amount of memory allocated for a given object
66626 * @objp: Pointer to the object
66627diff -urNp linux-3.0.4/mm/slob.c linux-3.0.4/mm/slob.c
66628--- linux-3.0.4/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
66629+++ linux-3.0.4/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
66630@@ -29,7 +29,7 @@
66631 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
66632 * alloc_pages() directly, allocating compound pages so the page order
66633 * does not have to be separately tracked, and also stores the exact
66634- * allocation size in page->private so that it can be used to accurately
66635+ * allocation size in slob_page->size so that it can be used to accurately
66636 * provide ksize(). These objects are detected in kfree() because slob_page()
66637 * is false for them.
66638 *
66639@@ -58,6 +58,7 @@
66640 */
66641
66642 #include <linux/kernel.h>
66643+#include <linux/sched.h>
66644 #include <linux/slab.h>
66645 #include <linux/mm.h>
66646 #include <linux/swap.h> /* struct reclaim_state */
66647@@ -102,7 +103,8 @@ struct slob_page {
66648 unsigned long flags; /* mandatory */
66649 atomic_t _count; /* mandatory */
66650 slobidx_t units; /* free units left in page */
66651- unsigned long pad[2];
66652+ unsigned long pad[1];
66653+ unsigned long size; /* size when >=PAGE_SIZE */
66654 slob_t *free; /* first free slob_t in page */
66655 struct list_head list; /* linked list of free pages */
66656 };
66657@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
66658 */
66659 static inline int is_slob_page(struct slob_page *sp)
66660 {
66661- return PageSlab((struct page *)sp);
66662+ return PageSlab((struct page *)sp) && !sp->size;
66663 }
66664
66665 static inline void set_slob_page(struct slob_page *sp)
66666@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
66667
66668 static inline struct slob_page *slob_page(const void *addr)
66669 {
66670- return (struct slob_page *)virt_to_page(addr);
66671+ return (struct slob_page *)virt_to_head_page(addr);
66672 }
66673
66674 /*
66675@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
66676 /*
66677 * Return the size of a slob block.
66678 */
66679-static slobidx_t slob_units(slob_t *s)
66680+static slobidx_t slob_units(const slob_t *s)
66681 {
66682 if (s->units > 0)
66683 return s->units;
66684@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
66685 /*
66686 * Return the next free slob block pointer after this one.
66687 */
66688-static slob_t *slob_next(slob_t *s)
66689+static slob_t *slob_next(const slob_t *s)
66690 {
66691 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
66692 slobidx_t next;
66693@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
66694 /*
66695 * Returns true if s is the last free block in its page.
66696 */
66697-static int slob_last(slob_t *s)
66698+static int slob_last(const slob_t *s)
66699 {
66700 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
66701 }
66702@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
66703 if (!page)
66704 return NULL;
66705
66706+ set_slob_page(page);
66707 return page_address(page);
66708 }
66709
66710@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
66711 if (!b)
66712 return NULL;
66713 sp = slob_page(b);
66714- set_slob_page(sp);
66715
66716 spin_lock_irqsave(&slob_lock, flags);
66717 sp->units = SLOB_UNITS(PAGE_SIZE);
66718 sp->free = b;
66719+ sp->size = 0;
66720 INIT_LIST_HEAD(&sp->list);
66721 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
66722 set_slob_page_free(sp, slob_list);
66723@@ -476,10 +479,9 @@ out:
66724 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
66725 */
66726
66727-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66728+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
66729 {
66730- unsigned int *m;
66731- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66732+ slob_t *m;
66733 void *ret;
66734
66735 lockdep_trace_alloc(gfp);
66736@@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
66737
66738 if (!m)
66739 return NULL;
66740- *m = size;
66741+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
66742+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
66743+ m[0].units = size;
66744+ m[1].units = align;
66745 ret = (void *)m + align;
66746
66747 trace_kmalloc_node(_RET_IP_, ret,
66748@@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
66749 gfp |= __GFP_COMP;
66750 ret = slob_new_pages(gfp, order, node);
66751 if (ret) {
66752- struct page *page;
66753- page = virt_to_page(ret);
66754- page->private = size;
66755+ struct slob_page *sp;
66756+ sp = slob_page(ret);
66757+ sp->size = size;
66758 }
66759
66760 trace_kmalloc_node(_RET_IP_, ret,
66761 size, PAGE_SIZE << order, gfp, node);
66762 }
66763
66764- kmemleak_alloc(ret, size, 1, gfp);
66765+ return ret;
66766+}
66767+
66768+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66769+{
66770+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66771+ void *ret = __kmalloc_node_align(size, gfp, node, align);
66772+
66773+ if (!ZERO_OR_NULL_PTR(ret))
66774+ kmemleak_alloc(ret, size, 1, gfp);
66775 return ret;
66776 }
66777 EXPORT_SYMBOL(__kmalloc_node);
66778@@ -531,13 +545,88 @@ void kfree(const void *block)
66779 sp = slob_page(block);
66780 if (is_slob_page(sp)) {
66781 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66782- unsigned int *m = (unsigned int *)(block - align);
66783- slob_free(m, *m + align);
66784- } else
66785+ slob_t *m = (slob_t *)(block - align);
66786+ slob_free(m, m[0].units + align);
66787+ } else {
66788+ clear_slob_page(sp);
66789+ free_slob_page(sp);
66790+ sp->size = 0;
66791 put_page(&sp->page);
66792+ }
66793 }
66794 EXPORT_SYMBOL(kfree);
66795
66796+void check_object_size(const void *ptr, unsigned long n, bool to)
66797+{
66798+
66799+#ifdef CONFIG_PAX_USERCOPY
66800+ struct slob_page *sp;
66801+ const slob_t *free;
66802+ const void *base;
66803+ unsigned long flags;
66804+
66805+ if (!n)
66806+ return;
66807+
66808+ if (ZERO_OR_NULL_PTR(ptr))
66809+ goto report;
66810+
66811+ if (!virt_addr_valid(ptr))
66812+ return;
66813+
66814+ sp = slob_page(ptr);
66815+ if (!PageSlab((struct page*)sp)) {
66816+ if (object_is_on_stack(ptr, n) == -1)
66817+ goto report;
66818+ return;
66819+ }
66820+
66821+ if (sp->size) {
66822+ base = page_address(&sp->page);
66823+ if (base <= ptr && n <= sp->size - (ptr - base))
66824+ return;
66825+ goto report;
66826+ }
66827+
66828+ /* some tricky double walking to find the chunk */
66829+ spin_lock_irqsave(&slob_lock, flags);
66830+ base = (void *)((unsigned long)ptr & PAGE_MASK);
66831+ free = sp->free;
66832+
66833+ while (!slob_last(free) && (void *)free <= ptr) {
66834+ base = free + slob_units(free);
66835+ free = slob_next(free);
66836+ }
66837+
66838+ while (base < (void *)free) {
66839+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
66840+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
66841+ int offset;
66842+
66843+ if (ptr < base + align)
66844+ break;
66845+
66846+ offset = ptr - base - align;
66847+ if (offset >= m) {
66848+ base += size;
66849+ continue;
66850+ }
66851+
66852+ if (n > m - offset)
66853+ break;
66854+
66855+ spin_unlock_irqrestore(&slob_lock, flags);
66856+ return;
66857+ }
66858+
66859+ spin_unlock_irqrestore(&slob_lock, flags);
66860+report:
66861+ pax_report_usercopy(ptr, n, to, NULL);
66862+#endif
66863+
66864+}
66865+EXPORT_SYMBOL(check_object_size);
66866+
66867 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
66868 size_t ksize(const void *block)
66869 {
66870@@ -550,10 +639,10 @@ size_t ksize(const void *block)
66871 sp = slob_page(block);
66872 if (is_slob_page(sp)) {
66873 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66874- unsigned int *m = (unsigned int *)(block - align);
66875- return SLOB_UNITS(*m) * SLOB_UNIT;
66876+ slob_t *m = (slob_t *)(block - align);
66877+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
66878 } else
66879- return sp->page.private;
66880+ return sp->size;
66881 }
66882 EXPORT_SYMBOL(ksize);
66883
66884@@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
66885 {
66886 struct kmem_cache *c;
66887
66888+#ifdef CONFIG_PAX_USERCOPY
66889+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
66890+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
66891+#else
66892 c = slob_alloc(sizeof(struct kmem_cache),
66893 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
66894+#endif
66895
66896 if (c) {
66897 c->name = name;
66898@@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
66899 {
66900 void *b;
66901
66902+#ifdef CONFIG_PAX_USERCOPY
66903+ b = __kmalloc_node_align(c->size, flags, node, c->align);
66904+#else
66905 if (c->size < PAGE_SIZE) {
66906 b = slob_alloc(c->size, flags, c->align, node);
66907 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66908 SLOB_UNITS(c->size) * SLOB_UNIT,
66909 flags, node);
66910 } else {
66911+ struct slob_page *sp;
66912+
66913 b = slob_new_pages(flags, get_order(c->size), node);
66914+ sp = slob_page(b);
66915+ sp->size = c->size;
66916 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66917 PAGE_SIZE << get_order(c->size),
66918 flags, node);
66919 }
66920+#endif
66921
66922 if (c->ctor)
66923 c->ctor(b);
66924@@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
66925
66926 static void __kmem_cache_free(void *b, int size)
66927 {
66928- if (size < PAGE_SIZE)
66929+ struct slob_page *sp = slob_page(b);
66930+
66931+ if (is_slob_page(sp))
66932 slob_free(b, size);
66933- else
66934+ else {
66935+ clear_slob_page(sp);
66936+ free_slob_page(sp);
66937+ sp->size = 0;
66938 slob_free_pages(b, get_order(size));
66939+ }
66940 }
66941
66942 static void kmem_rcu_free(struct rcu_head *head)
66943@@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
66944
66945 void kmem_cache_free(struct kmem_cache *c, void *b)
66946 {
66947+ int size = c->size;
66948+
66949+#ifdef CONFIG_PAX_USERCOPY
66950+ if (size + c->align < PAGE_SIZE) {
66951+ size += c->align;
66952+ b -= c->align;
66953+ }
66954+#endif
66955+
66956 kmemleak_free_recursive(b, c->flags);
66957 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
66958 struct slob_rcu *slob_rcu;
66959- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
66960- slob_rcu->size = c->size;
66961+ slob_rcu = b + (size - sizeof(struct slob_rcu));
66962+ slob_rcu->size = size;
66963 call_rcu(&slob_rcu->head, kmem_rcu_free);
66964 } else {
66965- __kmem_cache_free(b, c->size);
66966+ __kmem_cache_free(b, size);
66967 }
66968
66969+#ifdef CONFIG_PAX_USERCOPY
66970+ trace_kfree(_RET_IP_, b);
66971+#else
66972 trace_kmem_cache_free(_RET_IP_, b);
66973+#endif
66974+
66975 }
66976 EXPORT_SYMBOL(kmem_cache_free);
66977
66978diff -urNp linux-3.0.4/mm/slub.c linux-3.0.4/mm/slub.c
66979--- linux-3.0.4/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
66980+++ linux-3.0.4/mm/slub.c 2011-08-23 21:48:14.000000000 -0400
66981@@ -442,7 +442,7 @@ static void print_track(const char *s, s
66982 if (!t->addr)
66983 return;
66984
66985- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
66986+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
66987 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
66988 }
66989
66990@@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
66991
66992 page = virt_to_head_page(x);
66993
66994+ BUG_ON(!PageSlab(page));
66995+
66996 slab_free(s, page, x, _RET_IP_);
66997
66998 trace_kmem_cache_free(_RET_IP_, x);
66999@@ -2170,7 +2172,7 @@ static int slub_min_objects;
67000 * Merge control. If this is set then no merging of slab caches will occur.
67001 * (Could be removed. This was introduced to pacify the merge skeptics.)
67002 */
67003-static int slub_nomerge;
67004+static int slub_nomerge = 1;
67005
67006 /*
67007 * Calculate the order of allocation given an slab object size.
67008@@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
67009 * list to avoid pounding the page allocator excessively.
67010 */
67011 set_min_partial(s, ilog2(s->size));
67012- s->refcount = 1;
67013+ atomic_set(&s->refcount, 1);
67014 #ifdef CONFIG_NUMA
67015 s->remote_node_defrag_ratio = 1000;
67016 #endif
67017@@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
67018 void kmem_cache_destroy(struct kmem_cache *s)
67019 {
67020 down_write(&slub_lock);
67021- s->refcount--;
67022- if (!s->refcount) {
67023+ if (atomic_dec_and_test(&s->refcount)) {
67024 list_del(&s->list);
67025 if (kmem_cache_close(s)) {
67026 printk(KERN_ERR "SLUB %s: %s called for cache that "
67027@@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
67028 EXPORT_SYMBOL(__kmalloc_node);
67029 #endif
67030
67031+void check_object_size(const void *ptr, unsigned long n, bool to)
67032+{
67033+
67034+#ifdef CONFIG_PAX_USERCOPY
67035+ struct page *page;
67036+ struct kmem_cache *s = NULL;
67037+ unsigned long offset;
67038+
67039+ if (!n)
67040+ return;
67041+
67042+ if (ZERO_OR_NULL_PTR(ptr))
67043+ goto report;
67044+
67045+ if (!virt_addr_valid(ptr))
67046+ return;
67047+
67048+ page = virt_to_head_page(ptr);
67049+
67050+ if (!PageSlab(page)) {
67051+ if (object_is_on_stack(ptr, n) == -1)
67052+ goto report;
67053+ return;
67054+ }
67055+
67056+ s = page->slab;
67057+ if (!(s->flags & SLAB_USERCOPY))
67058+ goto report;
67059+
67060+ offset = (ptr - page_address(page)) % s->size;
67061+ if (offset <= s->objsize && n <= s->objsize - offset)
67062+ return;
67063+
67064+report:
67065+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
67066+#endif
67067+
67068+}
67069+EXPORT_SYMBOL(check_object_size);
67070+
67071 size_t ksize(const void *object)
67072 {
67073 struct page *page;
67074@@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
67075 int node;
67076
67077 list_add(&s->list, &slab_caches);
67078- s->refcount = -1;
67079+ atomic_set(&s->refcount, -1);
67080
67081 for_each_node_state(node, N_NORMAL_MEMORY) {
67082 struct kmem_cache_node *n = get_node(s, node);
67083@@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
67084
67085 /* Caches that are not of the two-to-the-power-of size */
67086 if (KMALLOC_MIN_SIZE <= 32) {
67087- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
67088+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
67089 caches++;
67090 }
67091
67092 if (KMALLOC_MIN_SIZE <= 64) {
67093- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
67094+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
67095 caches++;
67096 }
67097
67098 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
67099- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
67100+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
67101 caches++;
67102 }
67103
67104@@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
67105 /*
67106 * We may have set a slab to be unmergeable during bootstrap.
67107 */
67108- if (s->refcount < 0)
67109+ if (atomic_read(&s->refcount) < 0)
67110 return 1;
67111
67112 return 0;
67113@@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
67114 down_write(&slub_lock);
67115 s = find_mergeable(size, align, flags, name, ctor);
67116 if (s) {
67117- s->refcount++;
67118+ atomic_inc(&s->refcount);
67119 /*
67120 * Adjust the object sizes so that we clear
67121 * the complete object on kzalloc.
67122@@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
67123 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
67124
67125 if (sysfs_slab_alias(s, name)) {
67126- s->refcount--;
67127+ atomic_dec(&s->refcount);
67128 goto err;
67129 }
67130 up_write(&slub_lock);
67131@@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
67132
67133 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
67134 {
67135- return sprintf(buf, "%d\n", s->refcount - 1);
67136+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
67137 }
67138 SLAB_ATTR_RO(aliases);
67139
67140@@ -4894,7 +4935,13 @@ static const struct file_operations proc
67141
67142 static int __init slab_proc_init(void)
67143 {
67144- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
67145+ mode_t gr_mode = S_IRUGO;
67146+
67147+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67148+ gr_mode = S_IRUSR;
67149+#endif
67150+
67151+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
67152 return 0;
67153 }
67154 module_init(slab_proc_init);
67155diff -urNp linux-3.0.4/mm/swap.c linux-3.0.4/mm/swap.c
67156--- linux-3.0.4/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
67157+++ linux-3.0.4/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
67158@@ -31,6 +31,7 @@
67159 #include <linux/backing-dev.h>
67160 #include <linux/memcontrol.h>
67161 #include <linux/gfp.h>
67162+#include <linux/hugetlb.h>
67163
67164 #include "internal.h"
67165
67166@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
67167
67168 __page_cache_release(page);
67169 dtor = get_compound_page_dtor(page);
67170+ if (!PageHuge(page))
67171+ BUG_ON(dtor != free_compound_page);
67172 (*dtor)(page);
67173 }
67174
67175diff -urNp linux-3.0.4/mm/swapfile.c linux-3.0.4/mm/swapfile.c
67176--- linux-3.0.4/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
67177+++ linux-3.0.4/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
67178@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
67179
67180 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
67181 /* Activity counter to indicate that a swapon or swapoff has occurred */
67182-static atomic_t proc_poll_event = ATOMIC_INIT(0);
67183+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
67184
67185 static inline unsigned char swap_count(unsigned char ent)
67186 {
67187@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
67188 }
67189 filp_close(swap_file, NULL);
67190 err = 0;
67191- atomic_inc(&proc_poll_event);
67192+ atomic_inc_unchecked(&proc_poll_event);
67193 wake_up_interruptible(&proc_poll_wait);
67194
67195 out_dput:
67196@@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
67197
67198 poll_wait(file, &proc_poll_wait, wait);
67199
67200- if (s->event != atomic_read(&proc_poll_event)) {
67201- s->event = atomic_read(&proc_poll_event);
67202+ if (s->event != atomic_read_unchecked(&proc_poll_event)) {
67203+ s->event = atomic_read_unchecked(&proc_poll_event);
67204 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
67205 }
67206
67207@@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
67208 }
67209
67210 s->seq.private = s;
67211- s->event = atomic_read(&proc_poll_event);
67212+ s->event = atomic_read_unchecked(&proc_poll_event);
67213 return ret;
67214 }
67215
67216@@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
67217 (p->flags & SWP_DISCARDABLE) ? "D" : "");
67218
67219 mutex_unlock(&swapon_mutex);
67220- atomic_inc(&proc_poll_event);
67221+ atomic_inc_unchecked(&proc_poll_event);
67222 wake_up_interruptible(&proc_poll_wait);
67223
67224 if (S_ISREG(inode->i_mode))
67225diff -urNp linux-3.0.4/mm/util.c linux-3.0.4/mm/util.c
67226--- linux-3.0.4/mm/util.c 2011-07-21 22:17:23.000000000 -0400
67227+++ linux-3.0.4/mm/util.c 2011-08-23 21:47:56.000000000 -0400
67228@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
67229 * allocated buffer. Use this if you don't want to free the buffer immediately
67230 * like, for example, with RCU.
67231 */
67232+#undef __krealloc
67233 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
67234 {
67235 void *ret;
67236@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
67237 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
67238 * %NULL pointer, the object pointed to is freed.
67239 */
67240+#undef krealloc
67241 void *krealloc(const void *p, size_t new_size, gfp_t flags)
67242 {
67243 void *ret;
67244@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
67245 void arch_pick_mmap_layout(struct mm_struct *mm)
67246 {
67247 mm->mmap_base = TASK_UNMAPPED_BASE;
67248+
67249+#ifdef CONFIG_PAX_RANDMMAP
67250+ if (mm->pax_flags & MF_PAX_RANDMMAP)
67251+ mm->mmap_base += mm->delta_mmap;
67252+#endif
67253+
67254 mm->get_unmapped_area = arch_get_unmapped_area;
67255 mm->unmap_area = arch_unmap_area;
67256 }
67257diff -urNp linux-3.0.4/mm/vmalloc.c linux-3.0.4/mm/vmalloc.c
67258--- linux-3.0.4/mm/vmalloc.c 2011-09-02 18:11:21.000000000 -0400
67259+++ linux-3.0.4/mm/vmalloc.c 2011-08-23 21:47:56.000000000 -0400
67260@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
67261
67262 pte = pte_offset_kernel(pmd, addr);
67263 do {
67264- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67265- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67266+
67267+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67268+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
67269+ BUG_ON(!pte_exec(*pte));
67270+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
67271+ continue;
67272+ }
67273+#endif
67274+
67275+ {
67276+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67277+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67278+ }
67279 } while (pte++, addr += PAGE_SIZE, addr != end);
67280 }
67281
67282@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
67283 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
67284 {
67285 pte_t *pte;
67286+ int ret = -ENOMEM;
67287
67288 /*
67289 * nr is a running index into the array which helps higher level
67290@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
67291 pte = pte_alloc_kernel(pmd, addr);
67292 if (!pte)
67293 return -ENOMEM;
67294+
67295+ pax_open_kernel();
67296 do {
67297 struct page *page = pages[*nr];
67298
67299- if (WARN_ON(!pte_none(*pte)))
67300- return -EBUSY;
67301- if (WARN_ON(!page))
67302- return -ENOMEM;
67303+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67304+ if (pgprot_val(prot) & _PAGE_NX)
67305+#endif
67306+
67307+ if (WARN_ON(!pte_none(*pte))) {
67308+ ret = -EBUSY;
67309+ goto out;
67310+ }
67311+ if (WARN_ON(!page)) {
67312+ ret = -ENOMEM;
67313+ goto out;
67314+ }
67315 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
67316 (*nr)++;
67317 } while (pte++, addr += PAGE_SIZE, addr != end);
67318- return 0;
67319+ ret = 0;
67320+out:
67321+ pax_close_kernel();
67322+ return ret;
67323 }
67324
67325 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
67326@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
67327 * and fall back on vmalloc() if that fails. Others
67328 * just put it in the vmalloc space.
67329 */
67330-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
67331+#ifdef CONFIG_MODULES
67332+#ifdef MODULES_VADDR
67333 unsigned long addr = (unsigned long)x;
67334 if (addr >= MODULES_VADDR && addr < MODULES_END)
67335 return 1;
67336 #endif
67337+
67338+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67339+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
67340+ return 1;
67341+#endif
67342+
67343+#endif
67344+
67345 return is_vmalloc_addr(x);
67346 }
67347
67348@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
67349
67350 if (!pgd_none(*pgd)) {
67351 pud_t *pud = pud_offset(pgd, addr);
67352+#ifdef CONFIG_X86
67353+ if (!pud_large(*pud))
67354+#endif
67355 if (!pud_none(*pud)) {
67356 pmd_t *pmd = pmd_offset(pud, addr);
67357+#ifdef CONFIG_X86
67358+ if (!pmd_large(*pmd))
67359+#endif
67360 if (!pmd_none(*pmd)) {
67361 pte_t *ptep, pte;
67362
67363@@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
67364 struct vm_struct *area;
67365
67366 BUG_ON(in_interrupt());
67367+
67368+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67369+ if (flags & VM_KERNEXEC) {
67370+ if (start != VMALLOC_START || end != VMALLOC_END)
67371+ return NULL;
67372+ start = (unsigned long)MODULES_EXEC_VADDR;
67373+ end = (unsigned long)MODULES_EXEC_END;
67374+ }
67375+#endif
67376+
67377 if (flags & VM_IOREMAP) {
67378 int bit = fls(size);
67379
67380@@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
67381 if (count > totalram_pages)
67382 return NULL;
67383
67384+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67385+ if (!(pgprot_val(prot) & _PAGE_NX))
67386+ flags |= VM_KERNEXEC;
67387+#endif
67388+
67389 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
67390 __builtin_return_address(0));
67391 if (!area)
67392@@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
67393 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
67394 return NULL;
67395
67396+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67397+ if (!(pgprot_val(prot) & _PAGE_NX))
67398+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
67399+ node, gfp_mask, caller);
67400+ else
67401+#endif
67402+
67403 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
67404 gfp_mask, caller);
67405
67406@@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
67407 gfp_mask, prot, node, caller);
67408 }
67409
67410+#undef __vmalloc
67411 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
67412 {
67413 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
67414@@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
67415 * For tight control over page level allocator and protection flags
67416 * use __vmalloc() instead.
67417 */
67418+#undef vmalloc
67419 void *vmalloc(unsigned long size)
67420 {
67421 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
67422@@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
67423 * For tight control over page level allocator and protection flags
67424 * use __vmalloc() instead.
67425 */
67426+#undef vzalloc
67427 void *vzalloc(unsigned long size)
67428 {
67429 return __vmalloc_node_flags(size, -1,
67430@@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
67431 * The resulting memory area is zeroed so it can be mapped to userspace
67432 * without leaking data.
67433 */
67434+#undef vmalloc_user
67435 void *vmalloc_user(unsigned long size)
67436 {
67437 struct vm_struct *area;
67438@@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
67439 * For tight control over page level allocator and protection flags
67440 * use __vmalloc() instead.
67441 */
67442+#undef vmalloc_node
67443 void *vmalloc_node(unsigned long size, int node)
67444 {
67445 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
67446@@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
67447 * For tight control over page level allocator and protection flags
67448 * use __vmalloc_node() instead.
67449 */
67450+#undef vzalloc_node
67451 void *vzalloc_node(unsigned long size, int node)
67452 {
67453 return __vmalloc_node_flags(size, node,
67454@@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
67455 * For tight control over page level allocator and protection flags
67456 * use __vmalloc() instead.
67457 */
67458-
67459+#undef vmalloc_exec
67460 void *vmalloc_exec(unsigned long size)
67461 {
67462- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
67463+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
67464 -1, __builtin_return_address(0));
67465 }
67466
67467@@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
67468 * Allocate enough 32bit PA addressable pages to cover @size from the
67469 * page level allocator and map them into contiguous kernel virtual space.
67470 */
67471+#undef vmalloc_32
67472 void *vmalloc_32(unsigned long size)
67473 {
67474 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
67475@@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
67476 * The resulting memory area is 32bit addressable and zeroed so it can be
67477 * mapped to userspace without leaking data.
67478 */
67479+#undef vmalloc_32_user
67480 void *vmalloc_32_user(unsigned long size)
67481 {
67482 struct vm_struct *area;
67483@@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
67484 unsigned long uaddr = vma->vm_start;
67485 unsigned long usize = vma->vm_end - vma->vm_start;
67486
67487+ BUG_ON(vma->vm_mirror);
67488+
67489 if ((PAGE_SIZE-1) & (unsigned long)addr)
67490 return -EINVAL;
67491
67492diff -urNp linux-3.0.4/mm/vmstat.c linux-3.0.4/mm/vmstat.c
67493--- linux-3.0.4/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
67494+++ linux-3.0.4/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
67495@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
67496 *
67497 * vm_stat contains the global counters
67498 */
67499-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67500+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67501 EXPORT_SYMBOL(vm_stat);
67502
67503 #ifdef CONFIG_SMP
67504@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
67505 v = p->vm_stat_diff[i];
67506 p->vm_stat_diff[i] = 0;
67507 local_irq_restore(flags);
67508- atomic_long_add(v, &zone->vm_stat[i]);
67509+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
67510 global_diff[i] += v;
67511 #ifdef CONFIG_NUMA
67512 /* 3 seconds idle till flush */
67513@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
67514
67515 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
67516 if (global_diff[i])
67517- atomic_long_add(global_diff[i], &vm_stat[i]);
67518+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
67519 }
67520
67521 #endif
67522@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
67523 start_cpu_timer(cpu);
67524 #endif
67525 #ifdef CONFIG_PROC_FS
67526- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
67527- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
67528- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
67529- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
67530+ {
67531+ mode_t gr_mode = S_IRUGO;
67532+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67533+ gr_mode = S_IRUSR;
67534+#endif
67535+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
67536+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
67537+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67538+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
67539+#else
67540+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
67541+#endif
67542+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
67543+ }
67544 #endif
67545 return 0;
67546 }
67547diff -urNp linux-3.0.4/net/8021q/vlan.c linux-3.0.4/net/8021q/vlan.c
67548--- linux-3.0.4/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
67549+++ linux-3.0.4/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
67550@@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
67551 err = -EPERM;
67552 if (!capable(CAP_NET_ADMIN))
67553 break;
67554- if ((args.u.name_type >= 0) &&
67555- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
67556+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
67557 struct vlan_net *vn;
67558
67559 vn = net_generic(net, vlan_net_id);
67560diff -urNp linux-3.0.4/net/atm/atm_misc.c linux-3.0.4/net/atm/atm_misc.c
67561--- linux-3.0.4/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
67562+++ linux-3.0.4/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
67563@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
67564 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
67565 return 1;
67566 atm_return(vcc, truesize);
67567- atomic_inc(&vcc->stats->rx_drop);
67568+ atomic_inc_unchecked(&vcc->stats->rx_drop);
67569 return 0;
67570 }
67571 EXPORT_SYMBOL(atm_charge);
67572@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
67573 }
67574 }
67575 atm_return(vcc, guess);
67576- atomic_inc(&vcc->stats->rx_drop);
67577+ atomic_inc_unchecked(&vcc->stats->rx_drop);
67578 return NULL;
67579 }
67580 EXPORT_SYMBOL(atm_alloc_charge);
67581@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
67582
67583 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
67584 {
67585-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67586+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67587 __SONET_ITEMS
67588 #undef __HANDLE_ITEM
67589 }
67590@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
67591
67592 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
67593 {
67594-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
67595+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
67596 __SONET_ITEMS
67597 #undef __HANDLE_ITEM
67598 }
67599diff -urNp linux-3.0.4/net/atm/lec.h linux-3.0.4/net/atm/lec.h
67600--- linux-3.0.4/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
67601+++ linux-3.0.4/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
67602@@ -48,7 +48,7 @@ struct lane2_ops {
67603 const u8 *tlvs, u32 sizeoftlvs);
67604 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
67605 const u8 *tlvs, u32 sizeoftlvs);
67606-};
67607+} __no_const;
67608
67609 /*
67610 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
67611diff -urNp linux-3.0.4/net/atm/mpc.h linux-3.0.4/net/atm/mpc.h
67612--- linux-3.0.4/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
67613+++ linux-3.0.4/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
67614@@ -33,7 +33,7 @@ struct mpoa_client {
67615 struct mpc_parameters parameters; /* parameters for this client */
67616
67617 const struct net_device_ops *old_ops;
67618- struct net_device_ops new_ops;
67619+ net_device_ops_no_const new_ops;
67620 };
67621
67622
67623diff -urNp linux-3.0.4/net/atm/mpoa_caches.c linux-3.0.4/net/atm/mpoa_caches.c
67624--- linux-3.0.4/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
67625+++ linux-3.0.4/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
67626@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
67627 struct timeval now;
67628 struct k_message msg;
67629
67630+ pax_track_stack();
67631+
67632 do_gettimeofday(&now);
67633
67634 read_lock_bh(&client->ingress_lock);
67635diff -urNp linux-3.0.4/net/atm/proc.c linux-3.0.4/net/atm/proc.c
67636--- linux-3.0.4/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
67637+++ linux-3.0.4/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
67638@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
67639 const struct k_atm_aal_stats *stats)
67640 {
67641 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
67642- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
67643- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
67644- atomic_read(&stats->rx_drop));
67645+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
67646+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
67647+ atomic_read_unchecked(&stats->rx_drop));
67648 }
67649
67650 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
67651diff -urNp linux-3.0.4/net/atm/resources.c linux-3.0.4/net/atm/resources.c
67652--- linux-3.0.4/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
67653+++ linux-3.0.4/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
67654@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
67655 static void copy_aal_stats(struct k_atm_aal_stats *from,
67656 struct atm_aal_stats *to)
67657 {
67658-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67659+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67660 __AAL_STAT_ITEMS
67661 #undef __HANDLE_ITEM
67662 }
67663@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
67664 static void subtract_aal_stats(struct k_atm_aal_stats *from,
67665 struct atm_aal_stats *to)
67666 {
67667-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
67668+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
67669 __AAL_STAT_ITEMS
67670 #undef __HANDLE_ITEM
67671 }
67672diff -urNp linux-3.0.4/net/batman-adv/hard-interface.c linux-3.0.4/net/batman-adv/hard-interface.c
67673--- linux-3.0.4/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
67674+++ linux-3.0.4/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
67675@@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
67676 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
67677 dev_add_pack(&hard_iface->batman_adv_ptype);
67678
67679- atomic_set(&hard_iface->seqno, 1);
67680- atomic_set(&hard_iface->frag_seqno, 1);
67681+ atomic_set_unchecked(&hard_iface->seqno, 1);
67682+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
67683 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
67684 hard_iface->net_dev->name);
67685
67686diff -urNp linux-3.0.4/net/batman-adv/routing.c linux-3.0.4/net/batman-adv/routing.c
67687--- linux-3.0.4/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
67688+++ linux-3.0.4/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
67689@@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
67690 return;
67691
67692 /* could be changed by schedule_own_packet() */
67693- if_incoming_seqno = atomic_read(&if_incoming->seqno);
67694+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
67695
67696 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
67697
67698diff -urNp linux-3.0.4/net/batman-adv/send.c linux-3.0.4/net/batman-adv/send.c
67699--- linux-3.0.4/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
67700+++ linux-3.0.4/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
67701@@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
67702
67703 /* change sequence number to network order */
67704 batman_packet->seqno =
67705- htonl((uint32_t)atomic_read(&hard_iface->seqno));
67706+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
67707
67708 if (vis_server == VIS_TYPE_SERVER_SYNC)
67709 batman_packet->flags |= VIS_SERVER;
67710@@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
67711 else
67712 batman_packet->gw_flags = 0;
67713
67714- atomic_inc(&hard_iface->seqno);
67715+ atomic_inc_unchecked(&hard_iface->seqno);
67716
67717 slide_own_bcast_window(hard_iface);
67718 send_time = own_send_time(bat_priv);
67719diff -urNp linux-3.0.4/net/batman-adv/soft-interface.c linux-3.0.4/net/batman-adv/soft-interface.c
67720--- linux-3.0.4/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
67721+++ linux-3.0.4/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
67722@@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
67723
67724 /* set broadcast sequence number */
67725 bcast_packet->seqno =
67726- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
67727+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
67728
67729 add_bcast_packet_to_list(bat_priv, skb);
67730
67731@@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
67732 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
67733
67734 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
67735- atomic_set(&bat_priv->bcast_seqno, 1);
67736+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
67737 atomic_set(&bat_priv->tt_local_changed, 0);
67738
67739 bat_priv->primary_if = NULL;
67740diff -urNp linux-3.0.4/net/batman-adv/types.h linux-3.0.4/net/batman-adv/types.h
67741--- linux-3.0.4/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
67742+++ linux-3.0.4/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
67743@@ -38,8 +38,8 @@ struct hard_iface {
67744 int16_t if_num;
67745 char if_status;
67746 struct net_device *net_dev;
67747- atomic_t seqno;
67748- atomic_t frag_seqno;
67749+ atomic_unchecked_t seqno;
67750+ atomic_unchecked_t frag_seqno;
67751 unsigned char *packet_buff;
67752 int packet_len;
67753 struct kobject *hardif_obj;
67754@@ -142,7 +142,7 @@ struct bat_priv {
67755 atomic_t orig_interval; /* uint */
67756 atomic_t hop_penalty; /* uint */
67757 atomic_t log_level; /* uint */
67758- atomic_t bcast_seqno;
67759+ atomic_unchecked_t bcast_seqno;
67760 atomic_t bcast_queue_left;
67761 atomic_t batman_queue_left;
67762 char num_ifaces;
67763diff -urNp linux-3.0.4/net/batman-adv/unicast.c linux-3.0.4/net/batman-adv/unicast.c
67764--- linux-3.0.4/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
67765+++ linux-3.0.4/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
67766@@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
67767 frag1->flags = UNI_FRAG_HEAD | large_tail;
67768 frag2->flags = large_tail;
67769
67770- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
67771+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
67772 frag1->seqno = htons(seqno - 1);
67773 frag2->seqno = htons(seqno);
67774
67775diff -urNp linux-3.0.4/net/bridge/br_multicast.c linux-3.0.4/net/bridge/br_multicast.c
67776--- linux-3.0.4/net/bridge/br_multicast.c 2011-07-21 22:17:23.000000000 -0400
67777+++ linux-3.0.4/net/bridge/br_multicast.c 2011-08-23 21:47:56.000000000 -0400
67778@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
67779 nexthdr = ip6h->nexthdr;
67780 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
67781
67782- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
67783+ if (nexthdr != IPPROTO_ICMPV6)
67784 return 0;
67785
67786 /* Okay, we found ICMPv6 header */
67787diff -urNp linux-3.0.4/net/bridge/netfilter/ebtables.c linux-3.0.4/net/bridge/netfilter/ebtables.c
67788--- linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
67789+++ linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
67790@@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
67791 tmp.valid_hooks = t->table->valid_hooks;
67792 }
67793 mutex_unlock(&ebt_mutex);
67794- if (copy_to_user(user, &tmp, *len) != 0){
67795+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
67796 BUGPRINT("c2u Didn't work\n");
67797 ret = -EFAULT;
67798 break;
67799@@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
67800 int ret;
67801 void __user *pos;
67802
67803+ pax_track_stack();
67804+
67805 memset(&tinfo, 0, sizeof(tinfo));
67806
67807 if (cmd == EBT_SO_GET_ENTRIES) {
67808diff -urNp linux-3.0.4/net/caif/caif_socket.c linux-3.0.4/net/caif/caif_socket.c
67809--- linux-3.0.4/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
67810+++ linux-3.0.4/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
67811@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
67812 #ifdef CONFIG_DEBUG_FS
67813 struct debug_fs_counter {
67814 atomic_t caif_nr_socks;
67815- atomic_t caif_sock_create;
67816- atomic_t num_connect_req;
67817- atomic_t num_connect_resp;
67818- atomic_t num_connect_fail_resp;
67819- atomic_t num_disconnect;
67820- atomic_t num_remote_shutdown_ind;
67821- atomic_t num_tx_flow_off_ind;
67822- atomic_t num_tx_flow_on_ind;
67823- atomic_t num_rx_flow_off;
67824- atomic_t num_rx_flow_on;
67825+ atomic_unchecked_t caif_sock_create;
67826+ atomic_unchecked_t num_connect_req;
67827+ atomic_unchecked_t num_connect_resp;
67828+ atomic_unchecked_t num_connect_fail_resp;
67829+ atomic_unchecked_t num_disconnect;
67830+ atomic_unchecked_t num_remote_shutdown_ind;
67831+ atomic_unchecked_t num_tx_flow_off_ind;
67832+ atomic_unchecked_t num_tx_flow_on_ind;
67833+ atomic_unchecked_t num_rx_flow_off;
67834+ atomic_unchecked_t num_rx_flow_on;
67835 };
67836 static struct debug_fs_counter cnt;
67837 #define dbfs_atomic_inc(v) atomic_inc_return(v)
67838+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
67839 #define dbfs_atomic_dec(v) atomic_dec_return(v)
67840 #else
67841 #define dbfs_atomic_inc(v) 0
67842@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
67843 atomic_read(&cf_sk->sk.sk_rmem_alloc),
67844 sk_rcvbuf_lowwater(cf_sk));
67845 set_rx_flow_off(cf_sk);
67846- dbfs_atomic_inc(&cnt.num_rx_flow_off);
67847+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
67848 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
67849 }
67850
67851@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
67852 set_rx_flow_off(cf_sk);
67853 if (net_ratelimit())
67854 pr_debug("sending flow OFF due to rmem_schedule\n");
67855- dbfs_atomic_inc(&cnt.num_rx_flow_off);
67856+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
67857 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
67858 }
67859 skb->dev = NULL;
67860@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
67861 switch (flow) {
67862 case CAIF_CTRLCMD_FLOW_ON_IND:
67863 /* OK from modem to start sending again */
67864- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
67865+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
67866 set_tx_flow_on(cf_sk);
67867 cf_sk->sk.sk_state_change(&cf_sk->sk);
67868 break;
67869
67870 case CAIF_CTRLCMD_FLOW_OFF_IND:
67871 /* Modem asks us to shut up */
67872- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
67873+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
67874 set_tx_flow_off(cf_sk);
67875 cf_sk->sk.sk_state_change(&cf_sk->sk);
67876 break;
67877@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
67878 /* We're now connected */
67879 caif_client_register_refcnt(&cf_sk->layer,
67880 cfsk_hold, cfsk_put);
67881- dbfs_atomic_inc(&cnt.num_connect_resp);
67882+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
67883 cf_sk->sk.sk_state = CAIF_CONNECTED;
67884 set_tx_flow_on(cf_sk);
67885 cf_sk->sk.sk_state_change(&cf_sk->sk);
67886@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
67887
67888 case CAIF_CTRLCMD_INIT_FAIL_RSP:
67889 /* Connect request failed */
67890- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
67891+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
67892 cf_sk->sk.sk_err = ECONNREFUSED;
67893 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
67894 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
67895@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
67896
67897 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
67898 /* Modem has closed this connection, or device is down. */
67899- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
67900+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
67901 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
67902 cf_sk->sk.sk_err = ECONNRESET;
67903 set_rx_flow_on(cf_sk);
67904@@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
67905 return;
67906
67907 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
67908- dbfs_atomic_inc(&cnt.num_rx_flow_on);
67909+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
67910 set_rx_flow_on(cf_sk);
67911 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
67912 }
67913@@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
67914 /*ifindex = id of the interface.*/
67915 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
67916
67917- dbfs_atomic_inc(&cnt.num_connect_req);
67918+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
67919 cf_sk->layer.receive = caif_sktrecv_cb;
67920
67921 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
67922@@ -943,7 +944,7 @@ static int caif_release(struct socket *s
67923 spin_unlock_bh(&sk->sk_receive_queue.lock);
67924 sock->sk = NULL;
67925
67926- dbfs_atomic_inc(&cnt.num_disconnect);
67927+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
67928
67929 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
67930 if (cf_sk->debugfs_socket_dir != NULL)
67931@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
67932 cf_sk->conn_req.protocol = protocol;
67933 /* Increase the number of sockets created. */
67934 dbfs_atomic_inc(&cnt.caif_nr_socks);
67935- num = dbfs_atomic_inc(&cnt.caif_sock_create);
67936+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
67937 #ifdef CONFIG_DEBUG_FS
67938 if (!IS_ERR(debugfsdir)) {
67939
67940diff -urNp linux-3.0.4/net/caif/cfctrl.c linux-3.0.4/net/caif/cfctrl.c
67941--- linux-3.0.4/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
67942+++ linux-3.0.4/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
67943@@ -9,6 +9,7 @@
67944 #include <linux/stddef.h>
67945 #include <linux/spinlock.h>
67946 #include <linux/slab.h>
67947+#include <linux/sched.h>
67948 #include <net/caif/caif_layer.h>
67949 #include <net/caif/cfpkt.h>
67950 #include <net/caif/cfctrl.h>
67951@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
67952 dev_info.id = 0xff;
67953 memset(this, 0, sizeof(*this));
67954 cfsrvl_init(&this->serv, 0, &dev_info, false);
67955- atomic_set(&this->req_seq_no, 1);
67956- atomic_set(&this->rsp_seq_no, 1);
67957+ atomic_set_unchecked(&this->req_seq_no, 1);
67958+ atomic_set_unchecked(&this->rsp_seq_no, 1);
67959 this->serv.layer.receive = cfctrl_recv;
67960 sprintf(this->serv.layer.name, "ctrl");
67961 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
67962@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
67963 struct cfctrl_request_info *req)
67964 {
67965 spin_lock_bh(&ctrl->info_list_lock);
67966- atomic_inc(&ctrl->req_seq_no);
67967- req->sequence_no = atomic_read(&ctrl->req_seq_no);
67968+ atomic_inc_unchecked(&ctrl->req_seq_no);
67969+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
67970 list_add_tail(&req->list, &ctrl->list);
67971 spin_unlock_bh(&ctrl->info_list_lock);
67972 }
67973@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
67974 if (p != first)
67975 pr_warn("Requests are not received in order\n");
67976
67977- atomic_set(&ctrl->rsp_seq_no,
67978+ atomic_set_unchecked(&ctrl->rsp_seq_no,
67979 p->sequence_no);
67980 list_del(&p->list);
67981 goto out;
67982@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
67983 struct cfctrl *cfctrl = container_obj(layer);
67984 struct cfctrl_request_info rsp, *req;
67985
67986+ pax_track_stack();
67987
67988 cfpkt_extr_head(pkt, &cmdrsp, 1);
67989 cmd = cmdrsp & CFCTRL_CMD_MASK;
67990diff -urNp linux-3.0.4/net/core/datagram.c linux-3.0.4/net/core/datagram.c
67991--- linux-3.0.4/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
67992+++ linux-3.0.4/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
67993@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
67994 }
67995
67996 kfree_skb(skb);
67997- atomic_inc(&sk->sk_drops);
67998+ atomic_inc_unchecked(&sk->sk_drops);
67999 sk_mem_reclaim_partial(sk);
68000
68001 return err;
68002diff -urNp linux-3.0.4/net/core/dev.c linux-3.0.4/net/core/dev.c
68003--- linux-3.0.4/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
68004+++ linux-3.0.4/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
68005@@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
68006 if (no_module && capable(CAP_NET_ADMIN))
68007 no_module = request_module("netdev-%s", name);
68008 if (no_module && capable(CAP_SYS_MODULE)) {
68009+#ifdef CONFIG_GRKERNSEC_MODHARDEN
68010+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
68011+#else
68012 if (!request_module("%s", name))
68013 pr_err("Loading kernel module for a network device "
68014 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
68015 "instead\n", name);
68016+#endif
68017 }
68018 }
68019 EXPORT_SYMBOL(dev_load);
68020@@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
68021
68022 struct dev_gso_cb {
68023 void (*destructor)(struct sk_buff *skb);
68024-};
68025+} __no_const;
68026
68027 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
68028
68029@@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
68030 }
68031 EXPORT_SYMBOL(netif_rx_ni);
68032
68033-static void net_tx_action(struct softirq_action *h)
68034+static void net_tx_action(void)
68035 {
68036 struct softnet_data *sd = &__get_cpu_var(softnet_data);
68037
68038@@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
68039 }
68040 EXPORT_SYMBOL(netif_napi_del);
68041
68042-static void net_rx_action(struct softirq_action *h)
68043+static void net_rx_action(void)
68044 {
68045 struct softnet_data *sd = &__get_cpu_var(softnet_data);
68046 unsigned long time_limit = jiffies + 2;
68047diff -urNp linux-3.0.4/net/core/flow.c linux-3.0.4/net/core/flow.c
68048--- linux-3.0.4/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
68049+++ linux-3.0.4/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
68050@@ -60,7 +60,7 @@ struct flow_cache {
68051 struct timer_list rnd_timer;
68052 };
68053
68054-atomic_t flow_cache_genid = ATOMIC_INIT(0);
68055+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
68056 EXPORT_SYMBOL(flow_cache_genid);
68057 static struct flow_cache flow_cache_global;
68058 static struct kmem_cache *flow_cachep __read_mostly;
68059@@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
68060
68061 static int flow_entry_valid(struct flow_cache_entry *fle)
68062 {
68063- if (atomic_read(&flow_cache_genid) != fle->genid)
68064+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
68065 return 0;
68066 if (fle->object && !fle->object->ops->check(fle->object))
68067 return 0;
68068@@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
68069 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
68070 fcp->hash_count++;
68071 }
68072- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
68073+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
68074 flo = fle->object;
68075 if (!flo)
68076 goto ret_object;
68077@@ -274,7 +274,7 @@ nocache:
68078 }
68079 flo = resolver(net, key, family, dir, flo, ctx);
68080 if (fle) {
68081- fle->genid = atomic_read(&flow_cache_genid);
68082+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
68083 if (!IS_ERR(flo))
68084 fle->object = flo;
68085 else
68086diff -urNp linux-3.0.4/net/core/rtnetlink.c linux-3.0.4/net/core/rtnetlink.c
68087--- linux-3.0.4/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
68088+++ linux-3.0.4/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
68089@@ -56,7 +56,7 @@
68090 struct rtnl_link {
68091 rtnl_doit_func doit;
68092 rtnl_dumpit_func dumpit;
68093-};
68094+} __no_const;
68095
68096 static DEFINE_MUTEX(rtnl_mutex);
68097
68098diff -urNp linux-3.0.4/net/core/skbuff.c linux-3.0.4/net/core/skbuff.c
68099--- linux-3.0.4/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
68100+++ linux-3.0.4/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
68101@@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
68102 struct sock *sk = skb->sk;
68103 int ret = 0;
68104
68105+ pax_track_stack();
68106+
68107 if (splice_grow_spd(pipe, &spd))
68108 return -ENOMEM;
68109
68110diff -urNp linux-3.0.4/net/core/sock.c linux-3.0.4/net/core/sock.c
68111--- linux-3.0.4/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
68112+++ linux-3.0.4/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
68113@@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
68114 */
68115 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
68116 (unsigned)sk->sk_rcvbuf) {
68117- atomic_inc(&sk->sk_drops);
68118+ atomic_inc_unchecked(&sk->sk_drops);
68119 return -ENOMEM;
68120 }
68121
68122@@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
68123 return err;
68124
68125 if (!sk_rmem_schedule(sk, skb->truesize)) {
68126- atomic_inc(&sk->sk_drops);
68127+ atomic_inc_unchecked(&sk->sk_drops);
68128 return -ENOBUFS;
68129 }
68130
68131@@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
68132 skb_dst_force(skb);
68133
68134 spin_lock_irqsave(&list->lock, flags);
68135- skb->dropcount = atomic_read(&sk->sk_drops);
68136+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
68137 __skb_queue_tail(list, skb);
68138 spin_unlock_irqrestore(&list->lock, flags);
68139
68140@@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
68141 skb->dev = NULL;
68142
68143 if (sk_rcvqueues_full(sk, skb)) {
68144- atomic_inc(&sk->sk_drops);
68145+ atomic_inc_unchecked(&sk->sk_drops);
68146 goto discard_and_relse;
68147 }
68148 if (nested)
68149@@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
68150 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
68151 } else if (sk_add_backlog(sk, skb)) {
68152 bh_unlock_sock(sk);
68153- atomic_inc(&sk->sk_drops);
68154+ atomic_inc_unchecked(&sk->sk_drops);
68155 goto discard_and_relse;
68156 }
68157
68158@@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
68159 if (len > sizeof(peercred))
68160 len = sizeof(peercred);
68161 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
68162- if (copy_to_user(optval, &peercred, len))
68163+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
68164 return -EFAULT;
68165 goto lenout;
68166 }
68167@@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
68168 return -ENOTCONN;
68169 if (lv < len)
68170 return -EINVAL;
68171- if (copy_to_user(optval, address, len))
68172+ if (len > sizeof(address) || copy_to_user(optval, address, len))
68173 return -EFAULT;
68174 goto lenout;
68175 }
68176@@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
68177
68178 if (len > lv)
68179 len = lv;
68180- if (copy_to_user(optval, &v, len))
68181+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
68182 return -EFAULT;
68183 lenout:
68184 if (put_user(len, optlen))
68185@@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
68186 */
68187 smp_wmb();
68188 atomic_set(&sk->sk_refcnt, 1);
68189- atomic_set(&sk->sk_drops, 0);
68190+ atomic_set_unchecked(&sk->sk_drops, 0);
68191 }
68192 EXPORT_SYMBOL(sock_init_data);
68193
68194diff -urNp linux-3.0.4/net/decnet/sysctl_net_decnet.c linux-3.0.4/net/decnet/sysctl_net_decnet.c
68195--- linux-3.0.4/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
68196+++ linux-3.0.4/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
68197@@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
68198
68199 if (len > *lenp) len = *lenp;
68200
68201- if (copy_to_user(buffer, addr, len))
68202+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
68203 return -EFAULT;
68204
68205 *lenp = len;
68206@@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
68207
68208 if (len > *lenp) len = *lenp;
68209
68210- if (copy_to_user(buffer, devname, len))
68211+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
68212 return -EFAULT;
68213
68214 *lenp = len;
68215diff -urNp linux-3.0.4/net/econet/Kconfig linux-3.0.4/net/econet/Kconfig
68216--- linux-3.0.4/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
68217+++ linux-3.0.4/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
68218@@ -4,7 +4,7 @@
68219
68220 config ECONET
68221 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
68222- depends on EXPERIMENTAL && INET
68223+ depends on EXPERIMENTAL && INET && BROKEN
68224 ---help---
68225 Econet is a fairly old and slow networking protocol mainly used by
68226 Acorn computers to access file and print servers. It uses native
68227diff -urNp linux-3.0.4/net/ipv4/fib_frontend.c linux-3.0.4/net/ipv4/fib_frontend.c
68228--- linux-3.0.4/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
68229+++ linux-3.0.4/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
68230@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
68231 #ifdef CONFIG_IP_ROUTE_MULTIPATH
68232 fib_sync_up(dev);
68233 #endif
68234- atomic_inc(&net->ipv4.dev_addr_genid);
68235+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68236 rt_cache_flush(dev_net(dev), -1);
68237 break;
68238 case NETDEV_DOWN:
68239 fib_del_ifaddr(ifa, NULL);
68240- atomic_inc(&net->ipv4.dev_addr_genid);
68241+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68242 if (ifa->ifa_dev->ifa_list == NULL) {
68243 /* Last address was deleted from this interface.
68244 * Disable IP.
68245@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
68246 #ifdef CONFIG_IP_ROUTE_MULTIPATH
68247 fib_sync_up(dev);
68248 #endif
68249- atomic_inc(&net->ipv4.dev_addr_genid);
68250+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68251 rt_cache_flush(dev_net(dev), -1);
68252 break;
68253 case NETDEV_DOWN:
68254diff -urNp linux-3.0.4/net/ipv4/fib_semantics.c linux-3.0.4/net/ipv4/fib_semantics.c
68255--- linux-3.0.4/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
68256+++ linux-3.0.4/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
68257@@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
68258 nh->nh_saddr = inet_select_addr(nh->nh_dev,
68259 nh->nh_gw,
68260 nh->nh_parent->fib_scope);
68261- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
68262+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
68263
68264 return nh->nh_saddr;
68265 }
68266diff -urNp linux-3.0.4/net/ipv4/inet_diag.c linux-3.0.4/net/ipv4/inet_diag.c
68267--- linux-3.0.4/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
68268+++ linux-3.0.4/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
68269@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
68270 r->idiag_retrans = 0;
68271
68272 r->id.idiag_if = sk->sk_bound_dev_if;
68273+
68274+#ifdef CONFIG_GRKERNSEC_HIDESYM
68275+ r->id.idiag_cookie[0] = 0;
68276+ r->id.idiag_cookie[1] = 0;
68277+#else
68278 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
68279 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
68280+#endif
68281
68282 r->id.idiag_sport = inet->inet_sport;
68283 r->id.idiag_dport = inet->inet_dport;
68284@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
68285 r->idiag_family = tw->tw_family;
68286 r->idiag_retrans = 0;
68287 r->id.idiag_if = tw->tw_bound_dev_if;
68288+
68289+#ifdef CONFIG_GRKERNSEC_HIDESYM
68290+ r->id.idiag_cookie[0] = 0;
68291+ r->id.idiag_cookie[1] = 0;
68292+#else
68293 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
68294 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
68295+#endif
68296+
68297 r->id.idiag_sport = tw->tw_sport;
68298 r->id.idiag_dport = tw->tw_dport;
68299 r->id.idiag_src[0] = tw->tw_rcv_saddr;
68300@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
68301 if (sk == NULL)
68302 goto unlock;
68303
68304+#ifndef CONFIG_GRKERNSEC_HIDESYM
68305 err = -ESTALE;
68306 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
68307 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
68308 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
68309 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
68310 goto out;
68311+#endif
68312
68313 err = -ENOMEM;
68314 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
68315@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
68316 r->idiag_retrans = req->retrans;
68317
68318 r->id.idiag_if = sk->sk_bound_dev_if;
68319+
68320+#ifdef CONFIG_GRKERNSEC_HIDESYM
68321+ r->id.idiag_cookie[0] = 0;
68322+ r->id.idiag_cookie[1] = 0;
68323+#else
68324 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
68325 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
68326+#endif
68327
68328 tmo = req->expires - jiffies;
68329 if (tmo < 0)
68330diff -urNp linux-3.0.4/net/ipv4/inet_hashtables.c linux-3.0.4/net/ipv4/inet_hashtables.c
68331--- linux-3.0.4/net/ipv4/inet_hashtables.c 2011-09-02 18:11:21.000000000 -0400
68332+++ linux-3.0.4/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
68333@@ -18,12 +18,15 @@
68334 #include <linux/sched.h>
68335 #include <linux/slab.h>
68336 #include <linux/wait.h>
68337+#include <linux/security.h>
68338
68339 #include <net/inet_connection_sock.h>
68340 #include <net/inet_hashtables.h>
68341 #include <net/secure_seq.h>
68342 #include <net/ip.h>
68343
68344+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
68345+
68346 /*
68347 * Allocate and initialize a new local port bind bucket.
68348 * The bindhash mutex for snum's hash chain must be held here.
68349@@ -530,6 +533,8 @@ ok:
68350 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
68351 spin_unlock(&head->lock);
68352
68353+ gr_update_task_in_ip_table(current, inet_sk(sk));
68354+
68355 if (tw) {
68356 inet_twsk_deschedule(tw, death_row);
68357 while (twrefcnt) {
68358diff -urNp linux-3.0.4/net/ipv4/inetpeer.c linux-3.0.4/net/ipv4/inetpeer.c
68359--- linux-3.0.4/net/ipv4/inetpeer.c 2011-09-02 18:11:21.000000000 -0400
68360+++ linux-3.0.4/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
68361@@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
68362 unsigned int sequence;
68363 int invalidated, newrefcnt = 0;
68364
68365+ pax_track_stack();
68366+
68367 /* Look up for the address quickly, lockless.
68368 * Because of a concurrent writer, we might not find an existing entry.
68369 */
68370@@ -517,8 +519,8 @@ found: /* The existing node has been fo
68371 if (p) {
68372 p->daddr = *daddr;
68373 atomic_set(&p->refcnt, 1);
68374- atomic_set(&p->rid, 0);
68375- atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
68376+ atomic_set_unchecked(&p->rid, 0);
68377+ atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
68378 p->tcp_ts_stamp = 0;
68379 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
68380 p->rate_tokens = 0;
68381diff -urNp linux-3.0.4/net/ipv4/ip_fragment.c linux-3.0.4/net/ipv4/ip_fragment.c
68382--- linux-3.0.4/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
68383+++ linux-3.0.4/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
68384@@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
68385 return 0;
68386
68387 start = qp->rid;
68388- end = atomic_inc_return(&peer->rid);
68389+ end = atomic_inc_return_unchecked(&peer->rid);
68390 qp->rid = end;
68391
68392 rc = qp->q.fragments && (end - start) > max;
68393diff -urNp linux-3.0.4/net/ipv4/ip_sockglue.c linux-3.0.4/net/ipv4/ip_sockglue.c
68394--- linux-3.0.4/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
68395+++ linux-3.0.4/net/ipv4/ip_sockglue.c 2011-08-23 21:48:14.000000000 -0400
68396@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
68397 int val;
68398 int len;
68399
68400+ pax_track_stack();
68401+
68402 if (level != SOL_IP)
68403 return -EOPNOTSUPP;
68404
68405@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
68406 len = min_t(unsigned int, len, opt->optlen);
68407 if (put_user(len, optlen))
68408 return -EFAULT;
68409- if (copy_to_user(optval, opt->__data, len))
68410+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
68411+ copy_to_user(optval, opt->__data, len))
68412 return -EFAULT;
68413 return 0;
68414 }
68415diff -urNp linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
68416--- linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
68417+++ linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
68418@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
68419
68420 *len = 0;
68421
68422- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
68423+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
68424 if (*octets == NULL) {
68425 if (net_ratelimit())
68426 pr_notice("OOM in bsalg (%d)\n", __LINE__);
68427diff -urNp linux-3.0.4/net/ipv4/ping.c linux-3.0.4/net/ipv4/ping.c
68428--- linux-3.0.4/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
68429+++ linux-3.0.4/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
68430@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
68431 sk_rmem_alloc_get(sp),
68432 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68433 atomic_read(&sp->sk_refcnt), sp,
68434- atomic_read(&sp->sk_drops), len);
68435+ atomic_read_unchecked(&sp->sk_drops), len);
68436 }
68437
68438 static int ping_seq_show(struct seq_file *seq, void *v)
68439diff -urNp linux-3.0.4/net/ipv4/raw.c linux-3.0.4/net/ipv4/raw.c
68440--- linux-3.0.4/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
68441+++ linux-3.0.4/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
68442@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
68443 int raw_rcv(struct sock *sk, struct sk_buff *skb)
68444 {
68445 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
68446- atomic_inc(&sk->sk_drops);
68447+ atomic_inc_unchecked(&sk->sk_drops);
68448 kfree_skb(skb);
68449 return NET_RX_DROP;
68450 }
68451@@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
68452
68453 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
68454 {
68455+ struct icmp_filter filter;
68456+
68457 if (optlen > sizeof(struct icmp_filter))
68458 optlen = sizeof(struct icmp_filter);
68459- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
68460+ if (copy_from_user(&filter, optval, optlen))
68461 return -EFAULT;
68462+ raw_sk(sk)->filter = filter;
68463 return 0;
68464 }
68465
68466 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
68467 {
68468 int len, ret = -EFAULT;
68469+ struct icmp_filter filter;
68470
68471 if (get_user(len, optlen))
68472 goto out;
68473@@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
68474 if (len > sizeof(struct icmp_filter))
68475 len = sizeof(struct icmp_filter);
68476 ret = -EFAULT;
68477- if (put_user(len, optlen) ||
68478- copy_to_user(optval, &raw_sk(sk)->filter, len))
68479+ filter = raw_sk(sk)->filter;
68480+ if (put_user(len, optlen) || len > sizeof filter ||
68481+ copy_to_user(optval, &filter, len))
68482 goto out;
68483 ret = 0;
68484 out: return ret;
68485@@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
68486 sk_wmem_alloc_get(sp),
68487 sk_rmem_alloc_get(sp),
68488 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68489- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68490+ atomic_read(&sp->sk_refcnt),
68491+#ifdef CONFIG_GRKERNSEC_HIDESYM
68492+ NULL,
68493+#else
68494+ sp,
68495+#endif
68496+ atomic_read_unchecked(&sp->sk_drops));
68497 }
68498
68499 static int raw_seq_show(struct seq_file *seq, void *v)
68500diff -urNp linux-3.0.4/net/ipv4/route.c linux-3.0.4/net/ipv4/route.c
68501--- linux-3.0.4/net/ipv4/route.c 2011-09-02 18:11:21.000000000 -0400
68502+++ linux-3.0.4/net/ipv4/route.c 2011-08-23 21:47:56.000000000 -0400
68503@@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
68504
68505 static inline int rt_genid(struct net *net)
68506 {
68507- return atomic_read(&net->ipv4.rt_genid);
68508+ return atomic_read_unchecked(&net->ipv4.rt_genid);
68509 }
68510
68511 #ifdef CONFIG_PROC_FS
68512@@ -833,7 +833,7 @@ static void rt_cache_invalidate(struct n
68513 unsigned char shuffle;
68514
68515 get_random_bytes(&shuffle, sizeof(shuffle));
68516- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
68517+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
68518 }
68519
68520 /*
68521@@ -2834,7 +2834,7 @@ static int rt_fill_info(struct net *net,
68522 error = rt->dst.error;
68523 if (peer) {
68524 inet_peer_refcheck(rt->peer);
68525- id = atomic_read(&peer->ip_id_count) & 0xffff;
68526+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
68527 if (peer->tcp_ts_stamp) {
68528 ts = peer->tcp_ts;
68529 tsage = get_seconds() - peer->tcp_ts_stamp;
68530diff -urNp linux-3.0.4/net/ipv4/tcp.c linux-3.0.4/net/ipv4/tcp.c
68531--- linux-3.0.4/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
68532+++ linux-3.0.4/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
68533@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
68534 int val;
68535 int err = 0;
68536
68537+ pax_track_stack();
68538+
68539 /* These are data/string values, all the others are ints */
68540 switch (optname) {
68541 case TCP_CONGESTION: {
68542@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
68543 struct tcp_sock *tp = tcp_sk(sk);
68544 int val, len;
68545
68546+ pax_track_stack();
68547+
68548 if (get_user(len, optlen))
68549 return -EFAULT;
68550
68551diff -urNp linux-3.0.4/net/ipv4/tcp_ipv4.c linux-3.0.4/net/ipv4/tcp_ipv4.c
68552--- linux-3.0.4/net/ipv4/tcp_ipv4.c 2011-09-02 18:11:21.000000000 -0400
68553+++ linux-3.0.4/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
68554@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
68555 int sysctl_tcp_low_latency __read_mostly;
68556 EXPORT_SYMBOL(sysctl_tcp_low_latency);
68557
68558+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68559+extern int grsec_enable_blackhole;
68560+#endif
68561
68562 #ifdef CONFIG_TCP_MD5SIG
68563 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
68564@@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
68565 return 0;
68566
68567 reset:
68568+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68569+ if (!grsec_enable_blackhole)
68570+#endif
68571 tcp_v4_send_reset(rsk, skb);
68572 discard:
68573 kfree_skb(skb);
68574@@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
68575 TCP_SKB_CB(skb)->sacked = 0;
68576
68577 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68578- if (!sk)
68579+ if (!sk) {
68580+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68581+ ret = 1;
68582+#endif
68583 goto no_tcp_socket;
68584-
68585+ }
68586 process:
68587- if (sk->sk_state == TCP_TIME_WAIT)
68588+ if (sk->sk_state == TCP_TIME_WAIT) {
68589+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68590+ ret = 2;
68591+#endif
68592 goto do_time_wait;
68593+ }
68594
68595 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
68596 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
68597@@ -1724,6 +1737,10 @@ no_tcp_socket:
68598 bad_packet:
68599 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
68600 } else {
68601+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68602+ if (!grsec_enable_blackhole || (ret == 1 &&
68603+ (skb->dev->flags & IFF_LOOPBACK)))
68604+#endif
68605 tcp_v4_send_reset(NULL, skb);
68606 }
68607
68608@@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
68609 0, /* non standard timer */
68610 0, /* open_requests have no inode */
68611 atomic_read(&sk->sk_refcnt),
68612+#ifdef CONFIG_GRKERNSEC_HIDESYM
68613+ NULL,
68614+#else
68615 req,
68616+#endif
68617 len);
68618 }
68619
68620@@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
68621 sock_i_uid(sk),
68622 icsk->icsk_probes_out,
68623 sock_i_ino(sk),
68624- atomic_read(&sk->sk_refcnt), sk,
68625+ atomic_read(&sk->sk_refcnt),
68626+#ifdef CONFIG_GRKERNSEC_HIDESYM
68627+ NULL,
68628+#else
68629+ sk,
68630+#endif
68631 jiffies_to_clock_t(icsk->icsk_rto),
68632 jiffies_to_clock_t(icsk->icsk_ack.ato),
68633 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
68634@@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
68635 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
68636 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
68637 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68638- atomic_read(&tw->tw_refcnt), tw, len);
68639+ atomic_read(&tw->tw_refcnt),
68640+#ifdef CONFIG_GRKERNSEC_HIDESYM
68641+ NULL,
68642+#else
68643+ tw,
68644+#endif
68645+ len);
68646 }
68647
68648 #define TMPSZ 150
68649diff -urNp linux-3.0.4/net/ipv4/tcp_minisocks.c linux-3.0.4/net/ipv4/tcp_minisocks.c
68650--- linux-3.0.4/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
68651+++ linux-3.0.4/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
68652@@ -27,6 +27,10 @@
68653 #include <net/inet_common.h>
68654 #include <net/xfrm.h>
68655
68656+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68657+extern int grsec_enable_blackhole;
68658+#endif
68659+
68660 int sysctl_tcp_syncookies __read_mostly = 1;
68661 EXPORT_SYMBOL(sysctl_tcp_syncookies);
68662
68663@@ -745,6 +749,10 @@ listen_overflow:
68664
68665 embryonic_reset:
68666 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
68667+
68668+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68669+ if (!grsec_enable_blackhole)
68670+#endif
68671 if (!(flg & TCP_FLAG_RST))
68672 req->rsk_ops->send_reset(sk, skb);
68673
68674diff -urNp linux-3.0.4/net/ipv4/tcp_output.c linux-3.0.4/net/ipv4/tcp_output.c
68675--- linux-3.0.4/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
68676+++ linux-3.0.4/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
68677@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
68678 int mss;
68679 int s_data_desired = 0;
68680
68681+ pax_track_stack();
68682+
68683 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
68684 s_data_desired = cvp->s_data_desired;
68685 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
68686diff -urNp linux-3.0.4/net/ipv4/tcp_probe.c linux-3.0.4/net/ipv4/tcp_probe.c
68687--- linux-3.0.4/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
68688+++ linux-3.0.4/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
68689@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
68690 if (cnt + width >= len)
68691 break;
68692
68693- if (copy_to_user(buf + cnt, tbuf, width))
68694+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
68695 return -EFAULT;
68696 cnt += width;
68697 }
68698diff -urNp linux-3.0.4/net/ipv4/tcp_timer.c linux-3.0.4/net/ipv4/tcp_timer.c
68699--- linux-3.0.4/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
68700+++ linux-3.0.4/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
68701@@ -22,6 +22,10 @@
68702 #include <linux/gfp.h>
68703 #include <net/tcp.h>
68704
68705+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68706+extern int grsec_lastack_retries;
68707+#endif
68708+
68709 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
68710 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
68711 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
68712@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
68713 }
68714 }
68715
68716+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68717+ if ((sk->sk_state == TCP_LAST_ACK) &&
68718+ (grsec_lastack_retries > 0) &&
68719+ (grsec_lastack_retries < retry_until))
68720+ retry_until = grsec_lastack_retries;
68721+#endif
68722+
68723 if (retransmits_timed_out(sk, retry_until,
68724 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
68725 /* Has it gone just too far? */
68726diff -urNp linux-3.0.4/net/ipv4/udp.c linux-3.0.4/net/ipv4/udp.c
68727--- linux-3.0.4/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
68728+++ linux-3.0.4/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
68729@@ -86,6 +86,7 @@
68730 #include <linux/types.h>
68731 #include <linux/fcntl.h>
68732 #include <linux/module.h>
68733+#include <linux/security.h>
68734 #include <linux/socket.h>
68735 #include <linux/sockios.h>
68736 #include <linux/igmp.h>
68737@@ -107,6 +108,10 @@
68738 #include <net/xfrm.h>
68739 #include "udp_impl.h"
68740
68741+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68742+extern int grsec_enable_blackhole;
68743+#endif
68744+
68745 struct udp_table udp_table __read_mostly;
68746 EXPORT_SYMBOL(udp_table);
68747
68748@@ -564,6 +569,9 @@ found:
68749 return s;
68750 }
68751
68752+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
68753+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
68754+
68755 /*
68756 * This routine is called by the ICMP module when it gets some
68757 * sort of error condition. If err < 0 then the socket should
68758@@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
68759 dport = usin->sin_port;
68760 if (dport == 0)
68761 return -EINVAL;
68762+
68763+ err = gr_search_udp_sendmsg(sk, usin);
68764+ if (err)
68765+ return err;
68766 } else {
68767 if (sk->sk_state != TCP_ESTABLISHED)
68768 return -EDESTADDRREQ;
68769+
68770+ err = gr_search_udp_sendmsg(sk, NULL);
68771+ if (err)
68772+ return err;
68773+
68774 daddr = inet->inet_daddr;
68775 dport = inet->inet_dport;
68776 /* Open fast path for connected socket.
68777@@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
68778 udp_lib_checksum_complete(skb)) {
68779 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
68780 IS_UDPLITE(sk));
68781- atomic_inc(&sk->sk_drops);
68782+ atomic_inc_unchecked(&sk->sk_drops);
68783 __skb_unlink(skb, rcvq);
68784 __skb_queue_tail(&list_kill, skb);
68785 }
68786@@ -1184,6 +1201,10 @@ try_again:
68787 if (!skb)
68788 goto out;
68789
68790+ err = gr_search_udp_recvmsg(sk, skb);
68791+ if (err)
68792+ goto out_free;
68793+
68794 ulen = skb->len - sizeof(struct udphdr);
68795 if (len > ulen)
68796 len = ulen;
68797@@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
68798
68799 drop:
68800 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
68801- atomic_inc(&sk->sk_drops);
68802+ atomic_inc_unchecked(&sk->sk_drops);
68803 kfree_skb(skb);
68804 return -1;
68805 }
68806@@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
68807 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
68808
68809 if (!skb1) {
68810- atomic_inc(&sk->sk_drops);
68811+ atomic_inc_unchecked(&sk->sk_drops);
68812 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
68813 IS_UDPLITE(sk));
68814 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
68815@@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
68816 goto csum_error;
68817
68818 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
68819+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68820+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68821+#endif
68822 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
68823
68824 /*
68825@@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
68826 sk_wmem_alloc_get(sp),
68827 sk_rmem_alloc_get(sp),
68828 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68829- atomic_read(&sp->sk_refcnt), sp,
68830- atomic_read(&sp->sk_drops), len);
68831+ atomic_read(&sp->sk_refcnt),
68832+#ifdef CONFIG_GRKERNSEC_HIDESYM
68833+ NULL,
68834+#else
68835+ sp,
68836+#endif
68837+ atomic_read_unchecked(&sp->sk_drops), len);
68838 }
68839
68840 int udp4_seq_show(struct seq_file *seq, void *v)
68841diff -urNp linux-3.0.4/net/ipv6/inet6_connection_sock.c linux-3.0.4/net/ipv6/inet6_connection_sock.c
68842--- linux-3.0.4/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
68843+++ linux-3.0.4/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
68844@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
68845 #ifdef CONFIG_XFRM
68846 {
68847 struct rt6_info *rt = (struct rt6_info *)dst;
68848- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
68849+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
68850 }
68851 #endif
68852 }
68853@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
68854 #ifdef CONFIG_XFRM
68855 if (dst) {
68856 struct rt6_info *rt = (struct rt6_info *)dst;
68857- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
68858+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
68859 __sk_dst_reset(sk);
68860 dst = NULL;
68861 }
68862diff -urNp linux-3.0.4/net/ipv6/ipv6_sockglue.c linux-3.0.4/net/ipv6/ipv6_sockglue.c
68863--- linux-3.0.4/net/ipv6/ipv6_sockglue.c 2011-07-21 22:17:23.000000000 -0400
68864+++ linux-3.0.4/net/ipv6/ipv6_sockglue.c 2011-08-23 21:48:14.000000000 -0400
68865@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
68866 int val, valbool;
68867 int retv = -ENOPROTOOPT;
68868
68869+ pax_track_stack();
68870+
68871 if (optval == NULL)
68872 val=0;
68873 else {
68874@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
68875 int len;
68876 int val;
68877
68878+ pax_track_stack();
68879+
68880 if (ip6_mroute_opt(optname))
68881 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
68882
68883diff -urNp linux-3.0.4/net/ipv6/raw.c linux-3.0.4/net/ipv6/raw.c
68884--- linux-3.0.4/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
68885+++ linux-3.0.4/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
68886@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
68887 {
68888 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
68889 skb_checksum_complete(skb)) {
68890- atomic_inc(&sk->sk_drops);
68891+ atomic_inc_unchecked(&sk->sk_drops);
68892 kfree_skb(skb);
68893 return NET_RX_DROP;
68894 }
68895@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68896 struct raw6_sock *rp = raw6_sk(sk);
68897
68898 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
68899- atomic_inc(&sk->sk_drops);
68900+ atomic_inc_unchecked(&sk->sk_drops);
68901 kfree_skb(skb);
68902 return NET_RX_DROP;
68903 }
68904@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68905
68906 if (inet->hdrincl) {
68907 if (skb_checksum_complete(skb)) {
68908- atomic_inc(&sk->sk_drops);
68909+ atomic_inc_unchecked(&sk->sk_drops);
68910 kfree_skb(skb);
68911 return NET_RX_DROP;
68912 }
68913@@ -601,7 +601,7 @@ out:
68914 return err;
68915 }
68916
68917-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
68918+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
68919 struct flowi6 *fl6, struct dst_entry **dstp,
68920 unsigned int flags)
68921 {
68922@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
68923 u16 proto;
68924 int err;
68925
68926+ pax_track_stack();
68927+
68928 /* Rough check on arithmetic overflow,
68929 better check is made in ip6_append_data().
68930 */
68931@@ -909,12 +911,15 @@ do_confirm:
68932 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
68933 char __user *optval, int optlen)
68934 {
68935+ struct icmp6_filter filter;
68936+
68937 switch (optname) {
68938 case ICMPV6_FILTER:
68939 if (optlen > sizeof(struct icmp6_filter))
68940 optlen = sizeof(struct icmp6_filter);
68941- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
68942+ if (copy_from_user(&filter, optval, optlen))
68943 return -EFAULT;
68944+ raw6_sk(sk)->filter = filter;
68945 return 0;
68946 default:
68947 return -ENOPROTOOPT;
68948@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
68949 char __user *optval, int __user *optlen)
68950 {
68951 int len;
68952+ struct icmp6_filter filter;
68953
68954 switch (optname) {
68955 case ICMPV6_FILTER:
68956@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
68957 len = sizeof(struct icmp6_filter);
68958 if (put_user(len, optlen))
68959 return -EFAULT;
68960- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
68961+ filter = raw6_sk(sk)->filter;
68962+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
68963 return -EFAULT;
68964 return 0;
68965 default:
68966@@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
68967 0, 0L, 0,
68968 sock_i_uid(sp), 0,
68969 sock_i_ino(sp),
68970- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68971+ atomic_read(&sp->sk_refcnt),
68972+#ifdef CONFIG_GRKERNSEC_HIDESYM
68973+ NULL,
68974+#else
68975+ sp,
68976+#endif
68977+ atomic_read_unchecked(&sp->sk_drops));
68978 }
68979
68980 static int raw6_seq_show(struct seq_file *seq, void *v)
68981diff -urNp linux-3.0.4/net/ipv6/tcp_ipv6.c linux-3.0.4/net/ipv6/tcp_ipv6.c
68982--- linux-3.0.4/net/ipv6/tcp_ipv6.c 2011-09-02 18:11:21.000000000 -0400
68983+++ linux-3.0.4/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
68984@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
68985 }
68986 #endif
68987
68988+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68989+extern int grsec_enable_blackhole;
68990+#endif
68991+
68992 static void tcp_v6_hash(struct sock *sk)
68993 {
68994 if (sk->sk_state != TCP_CLOSE) {
68995@@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
68996 return 0;
68997
68998 reset:
68999+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69000+ if (!grsec_enable_blackhole)
69001+#endif
69002 tcp_v6_send_reset(sk, skb);
69003 discard:
69004 if (opt_skb)
69005@@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
69006 TCP_SKB_CB(skb)->sacked = 0;
69007
69008 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
69009- if (!sk)
69010+ if (!sk) {
69011+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69012+ ret = 1;
69013+#endif
69014 goto no_tcp_socket;
69015+ }
69016
69017 process:
69018- if (sk->sk_state == TCP_TIME_WAIT)
69019+ if (sk->sk_state == TCP_TIME_WAIT) {
69020+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69021+ ret = 2;
69022+#endif
69023 goto do_time_wait;
69024+ }
69025
69026 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
69027 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
69028@@ -1794,6 +1809,10 @@ no_tcp_socket:
69029 bad_packet:
69030 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
69031 } else {
69032+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69033+ if (!grsec_enable_blackhole || (ret == 1 &&
69034+ (skb->dev->flags & IFF_LOOPBACK)))
69035+#endif
69036 tcp_v6_send_reset(NULL, skb);
69037 }
69038
69039@@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
69040 uid,
69041 0, /* non standard timer */
69042 0, /* open_requests have no inode */
69043- 0, req);
69044+ 0,
69045+#ifdef CONFIG_GRKERNSEC_HIDESYM
69046+ NULL
69047+#else
69048+ req
69049+#endif
69050+ );
69051 }
69052
69053 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
69054@@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
69055 sock_i_uid(sp),
69056 icsk->icsk_probes_out,
69057 sock_i_ino(sp),
69058- atomic_read(&sp->sk_refcnt), sp,
69059+ atomic_read(&sp->sk_refcnt),
69060+#ifdef CONFIG_GRKERNSEC_HIDESYM
69061+ NULL,
69062+#else
69063+ sp,
69064+#endif
69065 jiffies_to_clock_t(icsk->icsk_rto),
69066 jiffies_to_clock_t(icsk->icsk_ack.ato),
69067 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
69068@@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
69069 dest->s6_addr32[2], dest->s6_addr32[3], destp,
69070 tw->tw_substate, 0, 0,
69071 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
69072- atomic_read(&tw->tw_refcnt), tw);
69073+ atomic_read(&tw->tw_refcnt),
69074+#ifdef CONFIG_GRKERNSEC_HIDESYM
69075+ NULL
69076+#else
69077+ tw
69078+#endif
69079+ );
69080 }
69081
69082 static int tcp6_seq_show(struct seq_file *seq, void *v)
69083diff -urNp linux-3.0.4/net/ipv6/udp.c linux-3.0.4/net/ipv6/udp.c
69084--- linux-3.0.4/net/ipv6/udp.c 2011-09-02 18:11:21.000000000 -0400
69085+++ linux-3.0.4/net/ipv6/udp.c 2011-08-23 21:48:14.000000000 -0400
69086@@ -50,6 +50,10 @@
69087 #include <linux/seq_file.h>
69088 #include "udp_impl.h"
69089
69090+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69091+extern int grsec_enable_blackhole;
69092+#endif
69093+
69094 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
69095 {
69096 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
69097@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
69098
69099 return 0;
69100 drop:
69101- atomic_inc(&sk->sk_drops);
69102+ atomic_inc_unchecked(&sk->sk_drops);
69103 drop_no_sk_drops_inc:
69104 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
69105 kfree_skb(skb);
69106@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
69107 continue;
69108 }
69109 drop:
69110- atomic_inc(&sk->sk_drops);
69111+ atomic_inc_unchecked(&sk->sk_drops);
69112 UDP6_INC_STATS_BH(sock_net(sk),
69113 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
69114 UDP6_INC_STATS_BH(sock_net(sk),
69115@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
69116 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
69117 proto == IPPROTO_UDPLITE);
69118
69119+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69120+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
69121+#endif
69122 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
69123
69124 kfree_skb(skb);
69125@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
69126 if (!sock_owned_by_user(sk))
69127 udpv6_queue_rcv_skb(sk, skb);
69128 else if (sk_add_backlog(sk, skb)) {
69129- atomic_inc(&sk->sk_drops);
69130+ atomic_inc_unchecked(&sk->sk_drops);
69131 bh_unlock_sock(sk);
69132 sock_put(sk);
69133 goto discard;
69134@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
69135 0, 0L, 0,
69136 sock_i_uid(sp), 0,
69137 sock_i_ino(sp),
69138- atomic_read(&sp->sk_refcnt), sp,
69139- atomic_read(&sp->sk_drops));
69140+ atomic_read(&sp->sk_refcnt),
69141+#ifdef CONFIG_GRKERNSEC_HIDESYM
69142+ NULL,
69143+#else
69144+ sp,
69145+#endif
69146+ atomic_read_unchecked(&sp->sk_drops));
69147 }
69148
69149 int udp6_seq_show(struct seq_file *seq, void *v)
69150diff -urNp linux-3.0.4/net/irda/ircomm/ircomm_tty.c linux-3.0.4/net/irda/ircomm/ircomm_tty.c
69151--- linux-3.0.4/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
69152+++ linux-3.0.4/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
69153@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
69154 add_wait_queue(&self->open_wait, &wait);
69155
69156 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
69157- __FILE__,__LINE__, tty->driver->name, self->open_count );
69158+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
69159
69160 /* As far as I can see, we protect open_count - Jean II */
69161 spin_lock_irqsave(&self->spinlock, flags);
69162 if (!tty_hung_up_p(filp)) {
69163 extra_count = 1;
69164- self->open_count--;
69165+ local_dec(&self->open_count);
69166 }
69167 spin_unlock_irqrestore(&self->spinlock, flags);
69168- self->blocked_open++;
69169+ local_inc(&self->blocked_open);
69170
69171 while (1) {
69172 if (tty->termios->c_cflag & CBAUD) {
69173@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
69174 }
69175
69176 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
69177- __FILE__,__LINE__, tty->driver->name, self->open_count );
69178+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
69179
69180 schedule();
69181 }
69182@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
69183 if (extra_count) {
69184 /* ++ is not atomic, so this should be protected - Jean II */
69185 spin_lock_irqsave(&self->spinlock, flags);
69186- self->open_count++;
69187+ local_inc(&self->open_count);
69188 spin_unlock_irqrestore(&self->spinlock, flags);
69189 }
69190- self->blocked_open--;
69191+ local_dec(&self->blocked_open);
69192
69193 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
69194- __FILE__,__LINE__, tty->driver->name, self->open_count);
69195+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
69196
69197 if (!retval)
69198 self->flags |= ASYNC_NORMAL_ACTIVE;
69199@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
69200 }
69201 /* ++ is not atomic, so this should be protected - Jean II */
69202 spin_lock_irqsave(&self->spinlock, flags);
69203- self->open_count++;
69204+ local_inc(&self->open_count);
69205
69206 tty->driver_data = self;
69207 self->tty = tty;
69208 spin_unlock_irqrestore(&self->spinlock, flags);
69209
69210 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
69211- self->line, self->open_count);
69212+ self->line, local_read(&self->open_count));
69213
69214 /* Not really used by us, but lets do it anyway */
69215 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
69216@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
69217 return;
69218 }
69219
69220- if ((tty->count == 1) && (self->open_count != 1)) {
69221+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
69222 /*
69223 * Uh, oh. tty->count is 1, which means that the tty
69224 * structure will be freed. state->count should always
69225@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
69226 */
69227 IRDA_DEBUG(0, "%s(), bad serial port count; "
69228 "tty->count is 1, state->count is %d\n", __func__ ,
69229- self->open_count);
69230- self->open_count = 1;
69231+ local_read(&self->open_count));
69232+ local_set(&self->open_count, 1);
69233 }
69234
69235- if (--self->open_count < 0) {
69236+ if (local_dec_return(&self->open_count) < 0) {
69237 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
69238- __func__, self->line, self->open_count);
69239- self->open_count = 0;
69240+ __func__, self->line, local_read(&self->open_count));
69241+ local_set(&self->open_count, 0);
69242 }
69243- if (self->open_count) {
69244+ if (local_read(&self->open_count)) {
69245 spin_unlock_irqrestore(&self->spinlock, flags);
69246
69247 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
69248@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
69249 tty->closing = 0;
69250 self->tty = NULL;
69251
69252- if (self->blocked_open) {
69253+ if (local_read(&self->blocked_open)) {
69254 if (self->close_delay)
69255 schedule_timeout_interruptible(self->close_delay);
69256 wake_up_interruptible(&self->open_wait);
69257@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
69258 spin_lock_irqsave(&self->spinlock, flags);
69259 self->flags &= ~ASYNC_NORMAL_ACTIVE;
69260 self->tty = NULL;
69261- self->open_count = 0;
69262+ local_set(&self->open_count, 0);
69263 spin_unlock_irqrestore(&self->spinlock, flags);
69264
69265 wake_up_interruptible(&self->open_wait);
69266@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
69267 seq_putc(m, '\n');
69268
69269 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
69270- seq_printf(m, "Open count: %d\n", self->open_count);
69271+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
69272 seq_printf(m, "Max data size: %d\n", self->max_data_size);
69273 seq_printf(m, "Max header size: %d\n", self->max_header_size);
69274
69275diff -urNp linux-3.0.4/net/iucv/af_iucv.c linux-3.0.4/net/iucv/af_iucv.c
69276--- linux-3.0.4/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
69277+++ linux-3.0.4/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
69278@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
69279
69280 write_lock_bh(&iucv_sk_list.lock);
69281
69282- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
69283+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69284 while (__iucv_get_sock_by_name(name)) {
69285 sprintf(name, "%08x",
69286- atomic_inc_return(&iucv_sk_list.autobind_name));
69287+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69288 }
69289
69290 write_unlock_bh(&iucv_sk_list.lock);
69291diff -urNp linux-3.0.4/net/key/af_key.c linux-3.0.4/net/key/af_key.c
69292--- linux-3.0.4/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
69293+++ linux-3.0.4/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
69294@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
69295 struct xfrm_migrate m[XFRM_MAX_DEPTH];
69296 struct xfrm_kmaddress k;
69297
69298+ pax_track_stack();
69299+
69300 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
69301 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
69302 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
69303@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
69304 static u32 get_acqseq(void)
69305 {
69306 u32 res;
69307- static atomic_t acqseq;
69308+ static atomic_unchecked_t acqseq;
69309
69310 do {
69311- res = atomic_inc_return(&acqseq);
69312+ res = atomic_inc_return_unchecked(&acqseq);
69313 } while (!res);
69314 return res;
69315 }
69316diff -urNp linux-3.0.4/net/lapb/lapb_iface.c linux-3.0.4/net/lapb/lapb_iface.c
69317--- linux-3.0.4/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
69318+++ linux-3.0.4/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
69319@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
69320 goto out;
69321
69322 lapb->dev = dev;
69323- lapb->callbacks = *callbacks;
69324+ lapb->callbacks = callbacks;
69325
69326 __lapb_insert_cb(lapb);
69327
69328@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
69329
69330 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
69331 {
69332- if (lapb->callbacks.connect_confirmation)
69333- lapb->callbacks.connect_confirmation(lapb->dev, reason);
69334+ if (lapb->callbacks->connect_confirmation)
69335+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
69336 }
69337
69338 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
69339 {
69340- if (lapb->callbacks.connect_indication)
69341- lapb->callbacks.connect_indication(lapb->dev, reason);
69342+ if (lapb->callbacks->connect_indication)
69343+ lapb->callbacks->connect_indication(lapb->dev, reason);
69344 }
69345
69346 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
69347 {
69348- if (lapb->callbacks.disconnect_confirmation)
69349- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
69350+ if (lapb->callbacks->disconnect_confirmation)
69351+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
69352 }
69353
69354 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
69355 {
69356- if (lapb->callbacks.disconnect_indication)
69357- lapb->callbacks.disconnect_indication(lapb->dev, reason);
69358+ if (lapb->callbacks->disconnect_indication)
69359+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
69360 }
69361
69362 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
69363 {
69364- if (lapb->callbacks.data_indication)
69365- return lapb->callbacks.data_indication(lapb->dev, skb);
69366+ if (lapb->callbacks->data_indication)
69367+ return lapb->callbacks->data_indication(lapb->dev, skb);
69368
69369 kfree_skb(skb);
69370 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
69371@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
69372 {
69373 int used = 0;
69374
69375- if (lapb->callbacks.data_transmit) {
69376- lapb->callbacks.data_transmit(lapb->dev, skb);
69377+ if (lapb->callbacks->data_transmit) {
69378+ lapb->callbacks->data_transmit(lapb->dev, skb);
69379 used = 1;
69380 }
69381
69382diff -urNp linux-3.0.4/net/mac80211/debugfs_sta.c linux-3.0.4/net/mac80211/debugfs_sta.c
69383--- linux-3.0.4/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
69384+++ linux-3.0.4/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
69385@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
69386 struct tid_ampdu_rx *tid_rx;
69387 struct tid_ampdu_tx *tid_tx;
69388
69389+ pax_track_stack();
69390+
69391 rcu_read_lock();
69392
69393 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
69394@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
69395 struct sta_info *sta = file->private_data;
69396 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
69397
69398+ pax_track_stack();
69399+
69400 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
69401 htc->ht_supported ? "" : "not ");
69402 if (htc->ht_supported) {
69403diff -urNp linux-3.0.4/net/mac80211/ieee80211_i.h linux-3.0.4/net/mac80211/ieee80211_i.h
69404--- linux-3.0.4/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
69405+++ linux-3.0.4/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
69406@@ -27,6 +27,7 @@
69407 #include <net/ieee80211_radiotap.h>
69408 #include <net/cfg80211.h>
69409 #include <net/mac80211.h>
69410+#include <asm/local.h>
69411 #include "key.h"
69412 #include "sta_info.h"
69413
69414@@ -721,7 +722,7 @@ struct ieee80211_local {
69415 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
69416 spinlock_t queue_stop_reason_lock;
69417
69418- int open_count;
69419+ local_t open_count;
69420 int monitors, cooked_mntrs;
69421 /* number of interfaces with corresponding FIF_ flags */
69422 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
69423diff -urNp linux-3.0.4/net/mac80211/iface.c linux-3.0.4/net/mac80211/iface.c
69424--- linux-3.0.4/net/mac80211/iface.c 2011-09-02 18:11:21.000000000 -0400
69425+++ linux-3.0.4/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
69426@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
69427 break;
69428 }
69429
69430- if (local->open_count == 0) {
69431+ if (local_read(&local->open_count) == 0) {
69432 res = drv_start(local);
69433 if (res)
69434 goto err_del_bss;
69435@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
69436 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
69437
69438 if (!is_valid_ether_addr(dev->dev_addr)) {
69439- if (!local->open_count)
69440+ if (!local_read(&local->open_count))
69441 drv_stop(local);
69442 return -EADDRNOTAVAIL;
69443 }
69444@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
69445 mutex_unlock(&local->mtx);
69446
69447 if (coming_up)
69448- local->open_count++;
69449+ local_inc(&local->open_count);
69450
69451 if (hw_reconf_flags) {
69452 ieee80211_hw_config(local, hw_reconf_flags);
69453@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
69454 err_del_interface:
69455 drv_remove_interface(local, &sdata->vif);
69456 err_stop:
69457- if (!local->open_count)
69458+ if (!local_read(&local->open_count))
69459 drv_stop(local);
69460 err_del_bss:
69461 sdata->bss = NULL;
69462@@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
69463 }
69464
69465 if (going_down)
69466- local->open_count--;
69467+ local_dec(&local->open_count);
69468
69469 switch (sdata->vif.type) {
69470 case NL80211_IFTYPE_AP_VLAN:
69471@@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
69472
69473 ieee80211_recalc_ps(local, -1);
69474
69475- if (local->open_count == 0) {
69476+ if (local_read(&local->open_count) == 0) {
69477 if (local->ops->napi_poll)
69478 napi_disable(&local->napi);
69479 ieee80211_clear_tx_pending(local);
69480diff -urNp linux-3.0.4/net/mac80211/main.c linux-3.0.4/net/mac80211/main.c
69481--- linux-3.0.4/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
69482+++ linux-3.0.4/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
69483@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
69484 local->hw.conf.power_level = power;
69485 }
69486
69487- if (changed && local->open_count) {
69488+ if (changed && local_read(&local->open_count)) {
69489 ret = drv_config(local, changed);
69490 /*
69491 * Goal:
69492diff -urNp linux-3.0.4/net/mac80211/mlme.c linux-3.0.4/net/mac80211/mlme.c
69493--- linux-3.0.4/net/mac80211/mlme.c 2011-09-02 18:11:21.000000000 -0400
69494+++ linux-3.0.4/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
69495@@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
69496 bool have_higher_than_11mbit = false;
69497 u16 ap_ht_cap_flags;
69498
69499+ pax_track_stack();
69500+
69501 /* AssocResp and ReassocResp have identical structure */
69502
69503 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
69504diff -urNp linux-3.0.4/net/mac80211/pm.c linux-3.0.4/net/mac80211/pm.c
69505--- linux-3.0.4/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
69506+++ linux-3.0.4/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
69507@@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
69508 cancel_work_sync(&local->dynamic_ps_enable_work);
69509 del_timer_sync(&local->dynamic_ps_timer);
69510
69511- local->wowlan = wowlan && local->open_count;
69512+ local->wowlan = wowlan && local_read(&local->open_count);
69513 if (local->wowlan) {
69514 int err = drv_suspend(local, wowlan);
69515 if (err) {
69516@@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
69517 }
69518
69519 /* stop hardware - this must stop RX */
69520- if (local->open_count)
69521+ if (local_read(&local->open_count))
69522 ieee80211_stop_device(local);
69523
69524 suspend:
69525diff -urNp linux-3.0.4/net/mac80211/rate.c linux-3.0.4/net/mac80211/rate.c
69526--- linux-3.0.4/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
69527+++ linux-3.0.4/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
69528@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
69529
69530 ASSERT_RTNL();
69531
69532- if (local->open_count)
69533+ if (local_read(&local->open_count))
69534 return -EBUSY;
69535
69536 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
69537diff -urNp linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c
69538--- linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
69539+++ linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
69540@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
69541
69542 spin_unlock_irqrestore(&events->lock, status);
69543
69544- if (copy_to_user(buf, pb, p))
69545+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
69546 return -EFAULT;
69547
69548 return p;
69549diff -urNp linux-3.0.4/net/mac80211/util.c linux-3.0.4/net/mac80211/util.c
69550--- linux-3.0.4/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
69551+++ linux-3.0.4/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
69552@@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
69553 #endif
69554
69555 /* restart hardware */
69556- if (local->open_count) {
69557+ if (local_read(&local->open_count)) {
69558 /*
69559 * Upon resume hardware can sometimes be goofy due to
69560 * various platform / driver / bus issues, so restarting
69561diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c
69562--- linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
69563+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
69564@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
69565 /* Increase the refcnt counter of the dest */
69566 atomic_inc(&dest->refcnt);
69567
69568- conn_flags = atomic_read(&dest->conn_flags);
69569+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
69570 if (cp->protocol != IPPROTO_UDP)
69571 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
69572 /* Bind with the destination and its corresponding transmitter */
69573@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
69574 atomic_set(&cp->refcnt, 1);
69575
69576 atomic_set(&cp->n_control, 0);
69577- atomic_set(&cp->in_pkts, 0);
69578+ atomic_set_unchecked(&cp->in_pkts, 0);
69579
69580 atomic_inc(&ipvs->conn_count);
69581 if (flags & IP_VS_CONN_F_NO_CPORT)
69582@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
69583
69584 /* Don't drop the entry if its number of incoming packets is not
69585 located in [0, 8] */
69586- i = atomic_read(&cp->in_pkts);
69587+ i = atomic_read_unchecked(&cp->in_pkts);
69588 if (i > 8 || i < 0) return 0;
69589
69590 if (!todrop_rate[i]) return 0;
69591diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c
69592--- linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
69593+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
69594@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
69595 ret = cp->packet_xmit(skb, cp, pd->pp);
69596 /* do not touch skb anymore */
69597
69598- atomic_inc(&cp->in_pkts);
69599+ atomic_inc_unchecked(&cp->in_pkts);
69600 ip_vs_conn_put(cp);
69601 return ret;
69602 }
69603@@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
69604 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
69605 pkts = sysctl_sync_threshold(ipvs);
69606 else
69607- pkts = atomic_add_return(1, &cp->in_pkts);
69608+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
69609
69610 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
69611 cp->protocol == IPPROTO_SCTP) {
69612diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c
69613--- linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-09-02 18:11:21.000000000 -0400
69614+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
69615@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
69616 ip_vs_rs_hash(ipvs, dest);
69617 write_unlock_bh(&ipvs->rs_lock);
69618 }
69619- atomic_set(&dest->conn_flags, conn_flags);
69620+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
69621
69622 /* bind the service */
69623 if (!dest->svc) {
69624@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
69625 " %-7s %-6d %-10d %-10d\n",
69626 &dest->addr.in6,
69627 ntohs(dest->port),
69628- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
69629+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69630 atomic_read(&dest->weight),
69631 atomic_read(&dest->activeconns),
69632 atomic_read(&dest->inactconns));
69633@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
69634 "%-7s %-6d %-10d %-10d\n",
69635 ntohl(dest->addr.ip),
69636 ntohs(dest->port),
69637- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
69638+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69639 atomic_read(&dest->weight),
69640 atomic_read(&dest->activeconns),
69641 atomic_read(&dest->inactconns));
69642@@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
69643 struct ip_vs_dest_user *udest_compat;
69644 struct ip_vs_dest_user_kern udest;
69645
69646+ pax_track_stack();
69647+
69648 if (!capable(CAP_NET_ADMIN))
69649 return -EPERM;
69650
69651@@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
69652
69653 entry.addr = dest->addr.ip;
69654 entry.port = dest->port;
69655- entry.conn_flags = atomic_read(&dest->conn_flags);
69656+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
69657 entry.weight = atomic_read(&dest->weight);
69658 entry.u_threshold = dest->u_threshold;
69659 entry.l_threshold = dest->l_threshold;
69660@@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
69661 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
69662
69663 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
69664- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69665+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69666 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
69667 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
69668 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
69669diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c
69670--- linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
69671+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
69672@@ -648,7 +648,7 @@ control:
69673 * i.e only increment in_pkts for Templates.
69674 */
69675 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
69676- int pkts = atomic_add_return(1, &cp->in_pkts);
69677+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
69678
69679 if (pkts % sysctl_sync_period(ipvs) != 1)
69680 return;
69681@@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
69682
69683 if (opt)
69684 memcpy(&cp->in_seq, opt, sizeof(*opt));
69685- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
69686+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
69687 cp->state = state;
69688 cp->old_state = cp->state;
69689 /*
69690diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c
69691--- linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
69692+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
69693@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
69694 else
69695 rc = NF_ACCEPT;
69696 /* do not touch skb anymore */
69697- atomic_inc(&cp->in_pkts);
69698+ atomic_inc_unchecked(&cp->in_pkts);
69699 goto out;
69700 }
69701
69702@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
69703 else
69704 rc = NF_ACCEPT;
69705 /* do not touch skb anymore */
69706- atomic_inc(&cp->in_pkts);
69707+ atomic_inc_unchecked(&cp->in_pkts);
69708 goto out;
69709 }
69710
69711diff -urNp linux-3.0.4/net/netfilter/Kconfig linux-3.0.4/net/netfilter/Kconfig
69712--- linux-3.0.4/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
69713+++ linux-3.0.4/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
69714@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
69715
69716 To compile it as a module, choose M here. If unsure, say N.
69717
69718+config NETFILTER_XT_MATCH_GRADM
69719+ tristate '"gradm" match support'
69720+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
69721+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
69722+ ---help---
69723+ The gradm match allows to match on grsecurity RBAC being enabled.
69724+ It is useful when iptables rules are applied early on bootup to
69725+ prevent connections to the machine (except from a trusted host)
69726+ while the RBAC system is disabled.
69727+
69728 config NETFILTER_XT_MATCH_HASHLIMIT
69729 tristate '"hashlimit" match support'
69730 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
69731diff -urNp linux-3.0.4/net/netfilter/Makefile linux-3.0.4/net/netfilter/Makefile
69732--- linux-3.0.4/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
69733+++ linux-3.0.4/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
69734@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
69735 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
69736 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
69737 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
69738+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
69739 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
69740 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
69741 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
69742diff -urNp linux-3.0.4/net/netfilter/nfnetlink_log.c linux-3.0.4/net/netfilter/nfnetlink_log.c
69743--- linux-3.0.4/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
69744+++ linux-3.0.4/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
69745@@ -70,7 +70,7 @@ struct nfulnl_instance {
69746 };
69747
69748 static DEFINE_SPINLOCK(instances_lock);
69749-static atomic_t global_seq;
69750+static atomic_unchecked_t global_seq;
69751
69752 #define INSTANCE_BUCKETS 16
69753 static struct hlist_head instance_table[INSTANCE_BUCKETS];
69754@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
69755 /* global sequence number */
69756 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
69757 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
69758- htonl(atomic_inc_return(&global_seq)));
69759+ htonl(atomic_inc_return_unchecked(&global_seq)));
69760
69761 if (data_len) {
69762 struct nlattr *nla;
69763diff -urNp linux-3.0.4/net/netfilter/nfnetlink_queue.c linux-3.0.4/net/netfilter/nfnetlink_queue.c
69764--- linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
69765+++ linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
69766@@ -58,7 +58,7 @@ struct nfqnl_instance {
69767 */
69768 spinlock_t lock;
69769 unsigned int queue_total;
69770- atomic_t id_sequence; /* 'sequence' of pkt ids */
69771+ atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
69772 struct list_head queue_list; /* packets in queue */
69773 };
69774
69775@@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
69776 nfmsg->version = NFNETLINK_V0;
69777 nfmsg->res_id = htons(queue->queue_num);
69778
69779- entry->id = atomic_inc_return(&queue->id_sequence);
69780+ entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
69781 pmsg.packet_id = htonl(entry->id);
69782 pmsg.hw_protocol = entskb->protocol;
69783 pmsg.hook = entry->hook;
69784@@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
69785 inst->peer_pid, inst->queue_total,
69786 inst->copy_mode, inst->copy_range,
69787 inst->queue_dropped, inst->queue_user_dropped,
69788- atomic_read(&inst->id_sequence), 1);
69789+ atomic_read_unchecked(&inst->id_sequence), 1);
69790 }
69791
69792 static const struct seq_operations nfqnl_seq_ops = {
69793diff -urNp linux-3.0.4/net/netfilter/xt_gradm.c linux-3.0.4/net/netfilter/xt_gradm.c
69794--- linux-3.0.4/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
69795+++ linux-3.0.4/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
69796@@ -0,0 +1,51 @@
69797+/*
69798+ * gradm match for netfilter
69799